mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
68 Commits
v0.10.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de83b249e9 | ||
|
|
4ff58fb3f8 | ||
|
|
ca865534ee | ||
|
|
4bf0551d8f | ||
|
|
be0c7a2a76 | ||
|
|
ff04e7bd55 | ||
|
|
508f1f50e5 | ||
|
|
f44b204e16 | ||
|
|
2020efb7ed | ||
|
|
c01cfcf3b6 | ||
|
|
643cd472ef | ||
|
|
668d727fc2 | ||
|
|
423ee35846 | ||
|
|
31c7855212 | ||
|
|
211f3942b6 | ||
|
|
beae282735 | ||
|
|
635348efb9 | ||
|
|
fa335c782f | ||
|
|
b019a58525 | ||
|
|
78eef6c343 | ||
|
|
cbcefb5d2f | ||
|
|
149085fb57 | ||
|
|
991eddb691 | ||
|
|
91de471376 | ||
|
|
c2d7e22749 | ||
|
|
e7c42794a0 | ||
|
|
3a8dfc07ed | ||
|
|
077b7f6505 | ||
|
|
240fa93bc5 | ||
|
|
6c7f846917 | ||
|
|
6db7c3b92c | ||
|
|
030267107a | ||
|
|
1c300a9881 | ||
|
|
0e9b33b822 | ||
|
|
36e3d1e703 | ||
|
|
f53264b613 | ||
|
|
414554ae5e | ||
|
|
150f945592 | ||
|
|
9a84afece1 | ||
|
|
e0c101c5ae | ||
|
|
e3a562aea0 | ||
|
|
4966e8ee08 | ||
|
|
d3542d5892 | ||
|
|
62d04b0fc7 | ||
|
|
6ecbc85448 | ||
|
|
b9b1eae6fb | ||
|
|
e95e42930d | ||
|
|
3cabb69014 | ||
|
|
ad8f90f177 | ||
|
|
8a62cf1699 | ||
|
|
a6b54dae99 | ||
|
|
112684bcb9 | ||
|
|
682e07c3cd | ||
|
|
9593ce16d9 | ||
|
|
2545c8b031 | ||
|
|
c9793e7029 | ||
|
|
4757132452 | ||
|
|
561b3b67b3 | ||
|
|
566f33e6ad | ||
|
|
83a75bac80 | ||
|
|
d8772f5685 | ||
|
|
df510187d6 | ||
|
|
137b9b72e7 | ||
|
|
09b4979673 | ||
|
|
eff8185d7c | ||
|
|
83ee94dd08 | ||
|
|
aae52ac2ee | ||
|
|
a7c4295c58 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
_output/
|
||||
_tmp/
|
||||
vendordiff.patch
|
||||
.idea/
|
||||
*.code-workspace
|
||||
|
||||
31
.travis.yml
31
.travis.yml
@@ -1,31 +0,0 @@
|
||||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.13.x
|
||||
env:
|
||||
- K8S_VERSION=v1.17.0
|
||||
- K8S_VERSION=v1.16.4
|
||||
- K8S_VERSION=v1.15.7
|
||||
services:
|
||||
- docker
|
||||
before_script:
|
||||
- curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
- wget https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64
|
||||
- chmod +x kind-linux-amd64
|
||||
- mv kind-linux-amd64 kind
|
||||
- export PATH=$PATH:$PWD
|
||||
- kind create cluster --image kindest/node:${K8S_VERSION} --config=$TRAVIS_BUILD_DIR/hack/kind_config.yaml
|
||||
- export KUBECONFIG="$(kind get kubeconfig-path)"
|
||||
- docker pull kubernetes/pause
|
||||
- kind load docker-image kubernetes/pause
|
||||
- kind get kubeconfig > /tmp/admin.conf
|
||||
script:
|
||||
- mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||
- mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
||||
- hack/verify-gofmt.sh
|
||||
- make lint
|
||||
- make build
|
||||
- make test-unit
|
||||
- make test-e2e
|
||||
@@ -10,7 +10,7 @@ We have full documentation on how to get started contributing here:
|
||||
|
||||
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
|
||||
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
|
||||
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
|
||||
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet/README.md) - Common resources for existing developers
|
||||
|
||||
## Mentorship
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.13.6
|
||||
FROM golang:1.13.9
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -19,7 +19,7 @@ RUN make
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -62,6 +62,14 @@ push: push-container-to-gcloud
|
||||
clean:
|
||||
rm -rf _output
|
||||
|
||||
verify: verify-gofmt verify-vendor lint
|
||||
|
||||
verify-gofmt:
|
||||
./hack/verify-gofmt.sh
|
||||
|
||||
verify-vendor:
|
||||
./hack/verify-vendor.sh
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
@@ -76,6 +84,6 @@ gen:
|
||||
go mod tidy
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin ${GOLANGCI_VERSION}
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
golangci-lint run
|
||||
./_output/bin/golangci-lint run
|
||||
|
||||
1
OWNERS
1
OWNERS
@@ -2,6 +2,7 @@ approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
reviewers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
229
README.md
229
README.md
@@ -1,4 +1,3 @@
|
||||
[](https://travis-ci.org/kubernetes-sigs/descheduler)
|
||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||
|
||||
# Descheduler for Kubernetes
|
||||
@@ -9,8 +8,8 @@ Scheduling in Kubernetes is the process of binding pending pods to nodes, and is
|
||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
|
||||
a Kubernetes cluster at that point of time when a new pod appears first time for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state change over time, there may be desired
|
||||
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
|
||||
to move already running pods to some other nodes for various reasons:
|
||||
|
||||
* Some nodes are under or over utilized.
|
||||
@@ -24,78 +23,48 @@ Descheduler, based on its policy, finds pods that can be moved and evicts them.
|
||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||
but relies on the default scheduler for that.
|
||||
|
||||
## Build and Run
|
||||
## Quick Start
|
||||
|
||||
- Checkout the repo into your $GOPATH directory under src/sigs.k8s.io/descheduler
|
||||
The descheduler can be run as a Job or CronJob inside of a k8s cluster. It has the
|
||||
advantage of being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||
being evicted by itself or by the kubelet.
|
||||
|
||||
Build descheduler:
|
||||
|
||||
```sh
|
||||
$ make
|
||||
```
|
||||
|
||||
and run descheduler:
|
||||
|
||||
```sh
|
||||
$ ./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file>
|
||||
```
|
||||
|
||||
If you want more information about what descheduler is doing add `-v 1` to the command line
|
||||
|
||||
For more information about available options run:
|
||||
```
|
||||
$ ./_output/bin/descheduler --help
|
||||
```
|
||||
|
||||
## Running Descheduler as a Job or CronJob
|
||||
|
||||
The descheduler can be run as a job or cronjob inside of a pod. It has the advantage of
|
||||
being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod to avoid being evicted by itself,
|
||||
or by the kubelet due to an eviction event. Since critical pods are created in the
|
||||
`kube-system` namespace, the descheduler job and its pod will also be created
|
||||
in `kube-system` namespace.
|
||||
|
||||
### Setup RBAC
|
||||
|
||||
To give necessary permissions for the descheduler to work in a pod.
|
||||
### Run As A Job
|
||||
|
||||
```
|
||||
$ kubectl create -f kubernetes/rbac.yaml
|
||||
kubectl create -f kubernetes/rbac.yaml
|
||||
kubectl create -f kubernetes/configmap.yaml
|
||||
kubectl create -f kubernetes/job.yaml
|
||||
```
|
||||
|
||||
### Create a configmap to store descheduler policy
|
||||
### Run As A CronJob
|
||||
|
||||
```
|
||||
$ kubectl create -f kubernetes/configmap.yaml
|
||||
kubectl create -f kubernetes/rbac.yaml
|
||||
kubectl create -f kubernetes/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob.yaml
|
||||
```
|
||||
|
||||
### Create a Job or CronJob
|
||||
## User Guide
|
||||
|
||||
As a Job.
|
||||
```
|
||||
$ kubectl create -f kubernetes/job.yaml
|
||||
```
|
||||
|
||||
Or as a CronJob.
|
||||
```
|
||||
$ kubectl create -f kubernetes/cronjob.yaml
|
||||
```
|
||||
See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
||||
Five strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` , `RemovePodsViolatingNodeTaints` are currently implemented.
|
||||
As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
|
||||
Seven strategies `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`,
|
||||
`RemovePodsViolatingNodeAffinity`, `RemovePodsViolatingNodeTaints`, `RemovePodsHavingTooManyRestarts`, and `PodLifeTime`
|
||||
are currently implemented. As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
By default, all strategies are enabled.
|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a Replica Set (RS),
|
||||
Replication Controller (RC), Deployment, or Job running on same node. If there are more,
|
||||
Replication Controller (RC), Deployment, or Job running on the same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with RS or RC, for example, running on same node. Once the failed nodes
|
||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods. Currently, there are no
|
||||
parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||
|
||||
@@ -113,10 +82,10 @@ This strategy finds nodes that are under utilized and evicts pods, if possible,
|
||||
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold, `thresholds`. The threshold
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, and number of pods in terms of percentage. If a node's
|
||||
usage is below threshold for all (cpu, memory, and number of pods), the node is considered underutilized.
|
||||
Currently, pods' request resource requirements are considered for computing node resource utilization.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. Any node, between the thresholds, `thresholds` and `targetThresholds` is
|
||||
@@ -124,7 +93,7 @@ considered appropriately utilized and is not considered for eviction. The thresh
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements.
|
||||
An example of the policy for this strategy would look like:
|
||||
Here is an example of a policy for this strategy:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
@@ -144,14 +113,19 @@ strategies:
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
There is another parameter associated with `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when number of under utilized nodes
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example, if there is podA on node and podB and podC(running on same node) have antiaffinity rules which prohibit them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This issue could happen, when the anti-affinity rules for pods B,C are created when they are already running on node. Currently, there are no parameters associated with this strategy. To disable this strategy, the policy should look like:
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||
node. Currently, there are no parameters associated with this strategy. To disable this strategy, the
|
||||
policy should look like:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
@@ -163,7 +137,23 @@ strategies:
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
|
||||
that no longer respects node affinity.
|
||||
|
||||
For example, there is podA scheduled on nodeA which satisfies the node
|
||||
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
|
||||
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
The policy file should look like:
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
@@ -175,9 +165,13 @@ strategies:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeTaints
|
||||
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example: there is a pod "podA" with toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations and will be evicted. The policy file should look like:
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted. The policy file should look like:
|
||||
|
||||
````
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
@@ -186,52 +180,111 @@ strategies:
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: true
|
||||
````
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
### PodLifeTime
|
||||
|
||||
This strategy evicts pods that are older than `.strategies.PodLifeTime.params.maxPodLifeTimeSeconds` The policy
|
||||
file should look like:
|
||||
|
||||
````
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
````
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
||||
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
|
||||
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted.
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Job are
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||
* All types of pods with annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||
annotation is used to override checks which prevent eviction and user can select which pod is evicted.
|
||||
User should know how and if the pod will be recreated.
|
||||
* Best efforts pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation descheduler.alpha.kubernetes.io/evict are evicted. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
|
||||
### Pod disruption Budget (PDB)
|
||||
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
||||
disruption budget (PDB). The pods are evicted by using eviction subresource to handle PDB.
|
||||
### Pod Disruption Budget (PDB)
|
||||
|
||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||
are evicted by using the eviction subresource to handle PDB.
|
||||
|
||||
## Compatibility Matrix
|
||||
|
||||
Descheduler | supported Kubernetes version
|
||||
-------------|-----------------------------
|
||||
v0.10 | v1.17
|
||||
v0.4-v0.9 | v1.9+
|
||||
v0.1-v0.3 | v1.7-v1.8
|
||||
|
||||
|
||||
## Getting Involved and Contributing
|
||||
|
||||
Are you interested in contributing to descheduler? We, the
|
||||
maintainers and community, would love your suggestions, contributions, and help!
|
||||
Also, the maintainers can be contacted at any time to learn more about how to get
|
||||
involved.
|
||||
|
||||
To get started writing code see the [contributor guide](docs/contributor-guide.md) in the `/docs` directory.
|
||||
|
||||
In the interest of getting more new people involved we tag issues with
|
||||
[`good first issue`][good_first_issue].
|
||||
These are typically issues that have smaller scope but are good ways to start
|
||||
to get acquainted with the codebase.
|
||||
|
||||
We also encourage ALL active community participants to act as if they are
|
||||
maintainers, even if you don't have "official" write permissions. This is a
|
||||
community effort, we are here to serve the Kubernetes community. If you have an
|
||||
active interest and you want to get involved, you have real power! Don't assume
|
||||
that the only people who can get things done around here are the "maintainers".
|
||||
|
||||
We also would love to add more "official" maintainers, so show us what you can
|
||||
do!
|
||||
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
|
||||
|
||||
### Communicating With Contributors
|
||||
|
||||
You can reach the contributors of this project at:
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
||||
## Roadmap
|
||||
|
||||
This roadmap is not in any particular order.
|
||||
|
||||
* Consideration of pod affinity
|
||||
* Strategy to consider pod life time
|
||||
* Strategy to consider number of pending pods
|
||||
* Integration with cluster autoscaler
|
||||
* Integration with metrics providers for obtaining real load metrics
|
||||
* Consideration of Kubernetes's scheduler's predicates
|
||||
|
||||
|
||||
## Compatibility matrix
|
||||
|
||||
Descheduler | supported Kubernetes version
|
||||
-------------|-----------------------------
|
||||
0.4+ | 1.9+
|
||||
0.1-0.3 | 1.7-1.8
|
||||
|
||||
## Community, discussion, contribution, and support
|
||||
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
||||
You can reach the maintainers of this project at:
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
|
||||
### Code of conduct
|
||||
|
||||
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||
|
||||
5
docs/README.md
Normal file
5
docs/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Documentation Index
|
||||
|
||||
- [Contributor Guide](contributor-guide.md)
|
||||
- [Release Guide](release-guide.md)
|
||||
- [User Guide](user-guide.md)
|
||||
42
docs/contributor-guide.md
Normal file
42
docs/contributor-guide.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Contributor Guide
|
||||
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.13+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind](https://kind.sigs.k8s.io/)
|
||||
|
||||
## Build and Run
|
||||
|
||||
Build descheduler.
|
||||
```sh
|
||||
cd $GOPATH/src/sigs.k8s.io
|
||||
git clone https://github.com/kubernetes-sigs/descheduler.git
|
||||
cd descheduler
|
||||
make
|
||||
```
|
||||
|
||||
Run descheduler.
|
||||
```sh
|
||||
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
```
|
||||
|
||||
View all CLI options.
|
||||
```
|
||||
./_output/bin/descheduler --help
|
||||
```
|
||||
|
||||
## Run Tests
|
||||
```
|
||||
GOOS=linux make dev-image
|
||||
kind create cluster --config hack/kind_config.yaml
|
||||
kind load docker-image <image name>
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
make test-unit
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
### Miscellaneous
|
||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||
@@ -1,4 +1,4 @@
|
||||
# Release process
|
||||
# Release Guide
|
||||
|
||||
## Semi-automatic
|
||||
|
||||
@@ -19,3 +19,26 @@
|
||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||
7. Publish release
|
||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
## Notes
|
||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||
|
||||
List images in staging registry.
|
||||
```
|
||||
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
||||
```
|
||||
|
||||
List descheduler image tags in the staging registry.
|
||||
```
|
||||
gcloud container images list-tags gcr.io/k8s-staging-descheduler/descheduler
|
||||
```
|
||||
|
||||
Get SHA256 hash for a specific image in the staging registry.
|
||||
```
|
||||
gcloud container images describe gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||
```
|
||||
|
||||
Pull image from the staging registry.
|
||||
```
|
||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||
```
|
||||
90
docs/user-guide.md
Normal file
90
docs/user-guide.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# User Guide
|
||||
|
||||
Starting with descheduler release v0.10.0 container images are available in these container registries.
|
||||
* `asia.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
* `eu.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
* `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
|
||||
## Policy Configuration Examples
|
||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||
|
||||
## CLI Options
|
||||
The descheduler has many CLI options that can be used to override its default behavior.
|
||||
```
|
||||
descheduler --help
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
Usage:
|
||||
descheduler [flags]
|
||||
descheduler [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
version Version of descheduler
|
||||
|
||||
Flags:
|
||||
--add-dir-header If true, adds the file directory to the header
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
--evict-local-storage-pods Enables evicting pods using local storage by descheduler
|
||||
-h, --help help for descheduler
|
||||
--kubeconfig string File with kube configuration.
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log-dir string If non-empty, write log files in this directory
|
||||
--log-file string If non-empty, use this log file
|
||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--max-pods-to-evict-per-node int Limits the maximum number of pods to be evicted per node by descheduler
|
||||
--node-selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--skip-headers If true, avoid header prefixes in the log messages
|
||||
--skip-log-headers If true, avoid headers when opening log files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
|
||||
Use "descheduler [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Production Use Cases
|
||||
This section contains descriptions of real world production use cases.
|
||||
|
||||
### Balance Cluster By Pod Age
|
||||
When initially migrating applications from a static virtual machine infrastructure to a cloud native k8s
|
||||
infrastructure there can be a tendency to treat application pods like static virtual machines. One approach
|
||||
to help prevent developers and operators from treating pods like virtual machines is to ensure that pods
|
||||
only run for a fixed amount
|
||||
of time.
|
||||
|
||||
The `PodLifeTime` strategy can be used to ensure that old pods are evicted. It is recommended to create a
|
||||
[pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) for each
|
||||
application to ensure application availability.
|
||||
```
|
||||
descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.yml
|
||||
```
|
||||
|
||||
This policy configuration file ensures that pods created more than 7 days ago are evicted.
|
||||
```
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
20
examples/pod-life-time.yml
Normal file
20
examples/pod-life-time.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
@@ -17,3 +17,9 @@ strategies:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThresholds: 100
|
||||
includingInitContainers: true
|
||||
|
||||
12
go.mod
12
go.mod
@@ -3,12 +3,14 @@ module sigs.k8s.io/descheduler
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.17.0
|
||||
k8s.io/apimachinery v0.17.3-beta.0
|
||||
k8s.io/apiserver v0.17.0
|
||||
k8s.io/client-go v0.17.0
|
||||
k8s.io/component-base v0.17.0
|
||||
k8s.io/api v0.17.5
|
||||
k8s.io/apimachinery v0.17.5
|
||||
k8s.io/apiserver v0.17.5
|
||||
k8s.io/client-go v0.17.5
|
||||
k8s.io/component-base v0.17.5
|
||||
k8s.io/klog v1.0.0
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
||||
46
go.sum
46
go.sum
@@ -42,7 +42,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -81,6 +80,8 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
@@ -96,7 +97,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
@@ -126,7 +126,6 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
@@ -158,7 +157,6 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -178,7 +176,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
@@ -210,7 +207,6 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
@@ -231,8 +227,8 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
@@ -323,33 +319,33 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
|
||||
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
|
||||
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.17.3-beta.0 h1:DeN0royOQ5+j3ytFWnDkxGiF5r9T7m6E9Ukzjg4vVHc=
|
||||
k8s.io/apimachinery v0.17.3-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apiserver v0.17.0 h1:XhUix+FKFDcBygWkQNp7wKKvZL030QUlH1o8vFeSgZA=
|
||||
k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
|
||||
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
|
||||
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
|
||||
k8s.io/component-base v0.17.0 h1:BnDFcmBDq+RPpxXjmuYnZXb59XNN9CaFrX8ba9+3xrA=
|
||||
k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
|
||||
k8s.io/api v0.17.5 h1:EkVieIbn1sC8YCDwckLKLpf+LoVofXYW72+LTZWo4aQ=
|
||||
k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
|
||||
k8s.io/apimachinery v0.17.5 h1:QAjfgeTtSGksdkgyaPrIb4lhU16FWMIzxKejYD5S0gc=
|
||||
k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
|
||||
k8s.io/apiserver v0.17.5 h1:8RJUTT2TzVZnCl3B+6YgYtaMTIcjwo11z40yZbA85ds=
|
||||
k8s.io/apiserver v0.17.5/go.mod h1:yo2cFZJ7AUj6BYYRWzEzs2cLtkY6F6zdxs8GhLu5V28=
|
||||
k8s.io/client-go v0.17.5 h1:Sm/9AQ415xPAX42JLKbJZnreXFgD2rVfDUDwOTm0gzA=
|
||||
k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
|
||||
k8s.io/component-base v0.17.5 h1:f4QKFRH1OIuWbpWwDm+vGvFQmrXmAbtPF8PREdtkIGE=
|
||||
k8s.io/component-base v0.17.5/go.mod h1:cZQAW1AUbBjD1lh+e/krbiIpqGz6fipI+vHslOBbuHE=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d h1:jocF7XFucw2pEiv2wS7wk2FRFCjDFGV1oa4TMs0SAT0=
|
||||
k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
21
hack/update-vendor.sh
Executable file
21
hack/update-vendor.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
88
hack/verify-vendor.sh
Executable file
88
hack/verify-vendor.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2020 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is mostly copied from the hack/verify-vendor.sh script located in k8s.io/kubernetes
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-vendor.XXXXXX")"
|
||||
|
||||
if [[ -z ${KEEP_TMP:-} ]]; then
|
||||
KEEP_TMP=false
|
||||
fi
|
||||
|
||||
function cleanup {
|
||||
# make go module dirs writeable
|
||||
chmod -R +w "${_tmpdir}"
|
||||
if [ "${KEEP_TMP}" == "true" ]; then
|
||||
echo "Leaving ${_tmpdir} for you to examine or copy. Please delete it manually when finished. (rm -rf ${_tmpdir})"
|
||||
else
|
||||
echo "Removing ${_tmpdir}"
|
||||
rm -rf "${_tmpdir}"
|
||||
fi
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
# Destroy deps in the copy of the kube tree
|
||||
rm -rf ./vendor
|
||||
|
||||
# Recreate the vendor tree using the nice clean set we just downloaded
|
||||
hack/update-vendor.sh
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
ret=0
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
# Test for diffs
|
||||
if ! _out="$(diff -Naupr --ignore-matching-lines='^\s*\"GoVersion\":' go.mod "${_deschedulertmp}/go.mod")"; then
|
||||
echo "Your go.mod file is different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
echo "If you're seeing this locally, run the below command to fix your go.mod:" >&2
|
||||
echo "hack/update-vendor.sh" >&2
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
echo "${_out}" > vendordiff.patch
|
||||
echo "If you're seeing this locally, run the below command to fix your directories:" >&2
|
||||
echo "hack/update-vendor.sh" >&2
|
||||
ret=1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
if [[ ${ret} -gt 0 ]]; then
|
||||
exit ${ret}
|
||||
fi
|
||||
|
||||
echo "Vendor Verified."
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.10.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler:v0.10.0
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.10.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@@ -46,8 +46,10 @@ type DeschedulerStrategy struct {
|
||||
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
|
||||
MaxPodLifeTimeSeconds *uint
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
@@ -58,3 +60,8 @@ type NodeResourceUtilizationThresholds struct {
|
||||
TargetThresholds ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32
|
||||
IncludingInitContainers bool
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@@ -46,8 +46,10 @@ type DeschedulerStrategy struct {
|
||||
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
@@ -58,3 +60,8 @@ type NodeResourceUtilizationThresholds struct {
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
@@ -65,6 +65,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
|
||||
}); err != nil {
|
||||
@@ -150,11 +160,33 @@ func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtili
|
||||
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
out.PodRestartThreshold = in.PodRestartThreshold
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
out.PodRestartThreshold = in.PodRestartThreshold
|
||||
out.IncludingInitContainers = in.IncludingInitContainers
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
|
||||
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
|
||||
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -164,10 +196,10 @@ func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyP
|
||||
}
|
||||
|
||||
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
|
||||
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
|
||||
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -103,6 +103,22 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
@@ -150,12 +166,26 @@ func (in StrategyList) DeepCopy() StrategyList {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -103,6 +103,22 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
@@ -150,12 +166,26 @@ func (in StrategyList) DeepCopy() StrategyList {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -19,16 +19,19 @@ package descheduler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
@@ -46,33 +49,61 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return fmt.Errorf("deschedulerPolicy is nil")
|
||||
}
|
||||
|
||||
return RunDeschedulerStrategies(rs, deschedulerPolicy)
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(rs.Client, rs.NodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
return err
|
||||
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||
}
|
||||
|
||||
type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||
|
||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
|
||||
strategyFuncs := map[string]strategyFunction{
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
||||
"PodLifeTime": strategies.PodLifeTime,
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return nil
|
||||
}
|
||||
|
||||
nodePodCount := utils.InitializeNodePodCount(nodes)
|
||||
wait.Until(func() {
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||
close(stopChannel)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
close(stopChannel)
|
||||
return
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
rs.Client,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
rs.MaxNoOfPodsToEvictPerNode,
|
||||
nodes,
|
||||
)
|
||||
|
||||
for name, f := range strategyFuncs {
|
||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||
f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
}
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
|
||||
92
pkg/descheduler/descheduler_test.go
Normal file
92
pkg/descheduler/descheduler_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{},
|
||||
}
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
dp := &api.DeschedulerPolicy{
|
||||
Strategies: api.StrategyList{
|
||||
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
defer close(stopChannel)
|
||||
|
||||
rs := options.NewDeschedulerServer()
|
||||
rs.Client = client
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
go func() {
|
||||
err := RunDeschedulerStrategies(rs, dp, "v1beta1", stopChannel)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Unable to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) < 1 {
|
||||
t.Errorf("The pod was evicted before a node was tained")
|
||||
}
|
||||
|
||||
n1WithTaint := n1.DeepCopy()
|
||||
n1WithTaint.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().Nodes().Update(n1WithTaint); err != nil {
|
||||
t.Fatalf("Unable to update node: %v\n", err)
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
|
||||
// Get over evicted pod result in panic
|
||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
||||
// List is better, it does not panic.
|
||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
if len(pods.Items) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
|
||||
}
|
||||
}
|
||||
@@ -32,6 +32,72 @@ import (
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvict int
|
||||
nodepodCount nodePodEvictedCount
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvict int,
|
||||
nodes []*v1.Node,
|
||||
) *PodEvictor {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvict: maxPodsToEvict,
|
||||
nodepodCount: nodePodCount,
|
||||
}
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
|
||||
return pe.nodepodCount[node]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() int {
|
||||
var total int
|
||||
for _, count := range pe.nodepodCount {
|
||||
total += count
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err error) {
|
||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
||||
}
|
||||
|
||||
success, err = EvictPod(pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
if success {
|
||||
pe.nodepodCount[node]++
|
||||
klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||
return success, nil
|
||||
}
|
||||
// err is used only for logging purposes
|
||||
klog.Errorf("Error when evicting pod: %#v (%#v)", pod.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||
if dryRun {
|
||||
return true, nil
|
||||
@@ -58,11 +124,12 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
|
||||
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
|
||||
r.Event(pod, v1.EventTypeNormal, "Descheduled", "pod evicted by sigs.k8s.io/descheduler")
|
||||
return true, nil
|
||||
} else if apierrors.IsTooManyRequests(err) {
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
} else if apierrors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -17,17 +17,18 @@ limitations under the License.
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1")
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
@@ -46,7 +47,7 @@ func TestEvictPod(t *testing.T) {
|
||||
description: "test pod eviction - pod absent",
|
||||
node: node1,
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1"), *test.BuildTestPod("p3", 450, 0, "node1")},
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -17,34 +17,27 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
ns, err := labels.Parse(nodeSelector)
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
nl := GetNodeLister(client, stopChannel)
|
||||
if nl != nil {
|
||||
// err is defined above
|
||||
if nodes, err = nl.List(ns); err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
// err is defined above
|
||||
if nodes, err = nodeInformer.Lister().List(ns); err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
@@ -74,22 +67,6 @@ func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-c
|
||||
return readyNodes, nil
|
||||
}
|
||||
|
||||
func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) corelisters.NodeLister {
|
||||
if stopChannel == nil {
|
||||
return nil
|
||||
}
|
||||
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
||||
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
nodeLister := corelisters.NewNodeLister(store)
|
||||
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)
|
||||
go reflector.Run(stopChannel)
|
||||
|
||||
// To give some time so that listing works, chosen randomly
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
return nodeLister
|
||||
}
|
||||
|
||||
// IsReady checks if the descheduler could run against given node.
|
||||
func IsReady(node *v1.Node) bool {
|
||||
for i := range node.Status.Conditions {
|
||||
@@ -133,12 +110,9 @@ func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -21,19 +21,20 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestReadyNodes(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node2 := test.BuildTestNode("node3", 1000, 2000, 9)
|
||||
node1 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
|
||||
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}}
|
||||
node3 := test.BuildTestNode("node4", 1000, 2000, 9)
|
||||
node3 := test.BuildTestNode("node4", 1000, 2000, 9, nil)
|
||||
node3.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}}
|
||||
node4 := test.BuildTestNode("node5", 1000, 2000, 9)
|
||||
node4 := test.BuildTestNode("node5", 1000, 2000, 9, nil)
|
||||
node4.Spec.Unschedulable = true
|
||||
node5 := test.BuildTestNode("node6", 1000, 2000, 9)
|
||||
node5 := test.BuildTestNode("node6", 1000, 2000, 9, nil)
|
||||
node5.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
||||
|
||||
if !IsReady(node1) {
|
||||
@@ -55,14 +56,23 @@ func TestReadyNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
node1.Labels = map[string]string{"type": "compute"}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9)
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2.Labels = map[string]string{"type": "infra"}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(node1, node2)
|
||||
nodeSelector := "type=compute"
|
||||
nodes, _ := ReadyNodes(fakeClient, nodeSelector, nil)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
stopChannel := make(chan struct{}, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodes, _ := ReadyNodes(fakeClient, nodeInformer, nodeSelector, nil)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
@@ -242,6 +252,49 @@ func TestPodFitsAnyNode(t *testing.T) {
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes (error node first)",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit none of the nodes",
|
||||
pod: &v1.Pod{
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
func TestIsEvictable(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13)
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
type testCase struct {
|
||||
pod *v1.Pod
|
||||
runBefore func(*v1.Pod)
|
||||
@@ -36,14 +36,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -51,14 +51,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
@@ -66,7 +66,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -83,7 +83,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
@@ -100,7 +100,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -118,14 +118,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
@@ -133,14 +133,14 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
@@ -148,7 +148,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: true,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -156,7 +156,7 @@ func TestIsEvictable(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name),
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -179,15 +179,15 @@ func TestIsEvictable(t *testing.T) {
|
||||
}
|
||||
}
|
||||
func TestPodTypes(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
|
||||
// These won't be evicted.
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name, nil)
|
||||
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
|
||||
@@ -23,67 +23,49 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
//type creator string
|
||||
type DuplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
podsEvicted := 0
|
||||
func RemoveDuplicatePods(
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
klog.V(1).Infof("%#v", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
klog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
||||
} else {
|
||||
nodepodCount[node]++
|
||||
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodepodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
|
||||
//type creator string
|
||||
type duplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return FindDuplicatePods(pods)
|
||||
}
|
||||
|
||||
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||
dpm := DuplicatePodsMap{}
|
||||
dpm := duplicatePodsMap{}
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
|
||||
for _, pod := range pods {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
|
||||
@@ -24,29 +24,31 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
// first setup pods
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p7.Namespace = "kube-system"
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
||||
p8.Namespace = "test"
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name, nil)
|
||||
p9.Namespace = "test"
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name, nil)
|
||||
p10.Namespace = "test"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
@@ -127,9 +129,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: testCase.pods}, nil
|
||||
@@ -137,7 +136,16 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, testCase.maxPodsToEvict, false)
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
testCase.maxPodsToEvict,
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
|
||||
@@ -19,11 +19,11 @@ package strategies
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -32,23 +32,23 @@ import (
|
||||
)
|
||||
|
||||
type NodeUsageMap struct {
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
nonRemovablePods []*v1.Pod
|
||||
bePods []*v1.Pod
|
||||
bPods []*v1.Pod
|
||||
gPods []*v1.Pod
|
||||
node *v1.Node
|
||||
usage api.ResourceThresholds
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount) {
|
||||
func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
// todo: move to config validation?
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if strategy.Params.NodeResourceUtilizationThresholds == nil {
|
||||
klog.V(1).Infof("NodeResourceUtilizationThresholds not set")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
if !validateThresholds(thresholds) {
|
||||
@@ -59,8 +59,8 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
return
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
|
||||
npm := createNodePodsMap(client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
||||
|
||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
@@ -90,9 +90,14 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||
evictPodsFromTargetNodes(
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
evictLocalStoragePods,
|
||||
podEvictor)
|
||||
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
@@ -133,9 +138,12 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
|
||||
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
|
||||
|
||||
usage := nodeUtilization(node, pods, evictLocalStoragePods)
|
||||
nuMap := NodeUsageMap{
|
||||
node: node,
|
||||
usage: usage,
|
||||
allPods: pods,
|
||||
}
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
if !nodeutil.IsNodeUnschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
|
||||
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
|
||||
@@ -146,7 +154,6 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
} else {
|
||||
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
|
||||
}
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
|
||||
}
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
@@ -154,14 +161,20 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount utils.NodePodEvictedCount) int {
|
||||
podsEvicted := 0
|
||||
func evictPodsFromTargetNodes(
|
||||
targetNodes, lowNodes []NodeUsageMap,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
evictLocalStoragePods bool,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory to be moved
|
||||
var totalPods, totalCPU, totalMem float64
|
||||
var taintsOfLowNodes = make(map[string][]v1.Taint, len(lowNodes))
|
||||
for _, node := range lowNodes {
|
||||
taintsOfLowNodes[node.node.Name] = node.node.Spec.Taints
|
||||
nodeCapacity := node.node.Status.Capacity
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
@@ -192,62 +205,64 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
currentPodsEvicted := nodepodCount[node.node]
|
||||
|
||||
nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods := classifyPods(node.allPods, evictLocalStoragePods)
|
||||
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bestEffortPods:%v, burstablePods:%v, guaranteedPods:%v", len(node.allPods), len(nonRemovablePods), len(bestEffortPods), len(burstablePods), len(guaranteedPods))
|
||||
|
||||
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
|
||||
if node.allPods[0].Spec.Priority != nil {
|
||||
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
|
||||
evictablePods = append(append(burstablePods, bestEffortPods...), guaranteedPods...)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
sortPodsBasedOnPriority(evictablePods)
|
||||
evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
evictPods(evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
} else {
|
||||
// TODO: Remove this when we support only priority.
|
||||
// Falling back to evicting pods based on priority.
|
||||
klog.V(1).Infof("Evicting pods based on QoS")
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
evictPods(bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict burstable pods
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
evictPods(burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict guaranteed pods
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
evictPods(guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
}
|
||||
nodepodCount[node.node] = currentPodsEvicted
|
||||
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
func evictPods(inputPods []*v1.Pod,
|
||||
client clientset.Interface,
|
||||
evictionPolicyGroupVersion string,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
nodeCapacity v1.ResourceList,
|
||||
nodeUsage api.ResourceThresholds,
|
||||
totalPods *float64,
|
||||
totalCPU *float64,
|
||||
totalMem *float64,
|
||||
podsEvicted *int,
|
||||
dryRun bool, maxPodsToEvict int) {
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
node *v1.Node) {
|
||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCPU > 0 || *totalMem > 0) {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if maxPodsToEvict > 0 && *podsEvicted+1 > maxPodsToEvict {
|
||||
break
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).Infof("Skipping eviction for Pod: %#v, doesn't tolerate node taint", pod.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
if !success {
|
||||
klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
|
||||
} else {
|
||||
klog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||
|
||||
success, err := podEvictor.EvictPod(pod, node)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if success {
|
||||
klog.V(3).Infof("Evicted pod: %#v", pod.Name)
|
||||
// update remaining pods
|
||||
*podsEvicted++
|
||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||
*totalPods--
|
||||
|
||||
@@ -349,38 +364,18 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeUtilization returns the current usage of node.
|
||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
bePods := []*v1.Pod{}
|
||||
nonRemovablePods := []*v1.Pod{}
|
||||
bPods := []*v1.Pod{}
|
||||
gPods := []*v1.Pod{}
|
||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) api.ResourceThresholds {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: {},
|
||||
v1.ResourceMemory: {},
|
||||
}
|
||||
for _, pod := range pods {
|
||||
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
if podutil.IsBestEffortPod(pod) {
|
||||
continue
|
||||
}
|
||||
} else if podutil.IsBestEffortPod(pod) {
|
||||
bePods = append(bePods, pod)
|
||||
continue
|
||||
} else if podutil.IsBurstablePod(pod) {
|
||||
bPods = append(bPods, pod)
|
||||
} else {
|
||||
gPods = append(gPods, pod)
|
||||
}
|
||||
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for name, quantity := range req {
|
||||
if name == v1.ResourceCPU || name == v1.ResourceMemory {
|
||||
if value, ok := totalReqs[name]; !ok {
|
||||
totalReqs[name] = quantity.DeepCopy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
totalReqs[name] = value
|
||||
}
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -390,12 +385,42 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
usage := api.ResourceThresholds{}
|
||||
totalCPUReq := totalReqs[v1.ResourceCPU]
|
||||
totalMemReq := totalReqs[v1.ResourceMemory]
|
||||
totalPods := len(pods)
|
||||
usage[v1.ResourceCPU] = api.Percentage((float64(totalCPUReq.MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue()))
|
||||
usage[v1.ResourceMemory] = api.Percentage(float64(totalMemReq.Value()) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
usage[v1.ResourcePods] = api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
return usage, pods, nonRemovablePods, bePods, bPods, gPods
|
||||
return api.ResourceThresholds{
|
||||
v1.ResourceCPU: api.Percentage((float64(totalReqs[v1.ResourceCPU].MilliValue()) * 100) / float64(nodeCapacity.Cpu().MilliValue())),
|
||||
v1.ResourceMemory: api.Percentage(float64(totalReqs[v1.ResourceMemory].Value()) / float64(nodeCapacity.Memory().Value()) * 100),
|
||||
v1.ResourcePods: api.Percentage((float64(totalPods) * 100) / float64(nodeCapacity.Pods().Value())),
|
||||
}
|
||||
}
|
||||
|
||||
func classifyPods(pods []*v1.Pod, evictLocalStoragePods bool) ([]*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods []*v1.Pod
|
||||
|
||||
// From https://kubernetes.io/docs/tasks/configure-pod-container/quality-service-pod/
|
||||
//
|
||||
// For a Pod to be given a QoS class of Guaranteed:
|
||||
// - every Container in the Pod must have a memory limit and a memory request, and they must be the same.
|
||||
// - every Container in the Pod must have a CPU limit and a CPU request, and they must be the same.
|
||||
// A Pod is given a QoS class of Burstable if:
|
||||
// - the Pod does not meet the criteria for QoS class Guaranteed.
|
||||
// - at least one Container in the Pod has a memory or CPU request.
|
||||
// For a Pod to be given a QoS class of BestEffort, the Containers in the Pod must not have any memory or CPU limits or requests.
|
||||
|
||||
for _, pod := range pods {
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
continue
|
||||
}
|
||||
|
||||
switch utils.GetPodQOS(pod) {
|
||||
case v1.PodQOSGuaranteed:
|
||||
guaranteedPods = append(guaranteedPods, pod)
|
||||
case v1.PodQOSBurstable:
|
||||
burstablePods = append(burstablePods, pod)
|
||||
default: // alias v1.PodQOSBestEffort
|
||||
bestEffortPods = append(bestEffortPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return nonRemovablePods, bestEffortPods, burstablePods, guaranteedPods
|
||||
}
|
||||
|
||||
@@ -23,249 +23,380 @@ import (
|
||||
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
)
|
||||
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p8.Spec.Priority = &priority
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
func setRSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList() }
|
||||
func setDSOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList() }
|
||||
func setNormalOwnerRef(pod *v1.Pod) { pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList() }
|
||||
func setHighPriority(pod *v1.Pod) { pod.Spec.Priority = &highPriority }
|
||||
func setLowPriority(pod *v1.Pod) { pod.Spec.Priority = &lowPriority }
|
||||
func setNodeUnschedulable(node *v1.Node) { node.Spec.Unschedulable = true }
|
||||
|
||||
func makeBestEffortPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Requests = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// TODO: Make this table driven.
|
||||
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||
var thresholds = make(api.ResourceThresholds)
|
||||
var targetThresholds = make(api.ResourceThresholds)
|
||||
thresholds[v1.ResourceCPU] = 30
|
||||
thresholds[v1.ResourcePods] = 30
|
||||
targetThresholds[v1.ResourceCPU] = 50
|
||||
targetThresholds[v1.ResourcePods] = 50
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
|
||||
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
|
||||
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
|
||||
n3.Spec.Unschedulable = true
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &highPriority
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
|
||||
p5.Spec.Priority = &lowPriority
|
||||
func makeBurstablePod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
pod.Spec.Containers[0].Resources.Limits = nil
|
||||
}
|
||||
|
||||
// These won't be evicted.
|
||||
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
|
||||
p6.Spec.Priority = &highPriority
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
|
||||
p7.Spec.Priority = &lowPriority
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
|
||||
p8.Spec.Priority = &lowPriority
|
||||
func makeGuaranteedPod(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU]
|
||||
pod.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory]
|
||||
}
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.Spec.Volumes = []v1.Volume{
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes map[string]*v1.Node
|
||||
pods map[string]*v1.PodList
|
||||
expectedPodsEvicted int
|
||||
evictedPods []string
|
||||
}{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
name: "without priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, setDSOwnerRef),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with priorities",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
setHighPriority(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
setLowPriority(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "without priorities evicting best-effort pods only",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
nodes: map[string]*v1.Node{
|
||||
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, setNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: map[string]*v1.PodList{
|
||||
n1NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setRSOwnerRef(pod)
|
||||
makeBestEffortPod(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
*test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
setDSOwnerRef(pod)
|
||||
}),
|
||||
*test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
setNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
*test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
},
|
||||
n2NodeName: {
|
||||
Items: []v1.Pod{
|
||||
*test.BuildTestPod("p9", 400, 0, n1NodeName, setRSOwnerRef),
|
||||
},
|
||||
},
|
||||
n3NodeName: {},
|
||||
},
|
||||
expectedPodsEvicted: 4,
|
||||
evictedPods: []string{"p1", "p2", "p4", "p5"},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p7.Annotations = test.GetMirrorPodAnnotation()
|
||||
// A Critical Pod.
|
||||
p8.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p8.Spec.Priority = &priority
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, "n1") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n2") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
|
||||
}
|
||||
if strings.Contains(fieldString, "n3") {
|
||||
return true, &v1.PodList{Items: []v1.Pod{}}, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
switch getAction.GetName() {
|
||||
case n1.Name:
|
||||
return true, n1, nil
|
||||
case n2.Name:
|
||||
return true, n2, nil
|
||||
case n3.Name:
|
||||
return true, n3, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
list := action.(core.ListAction)
|
||||
fieldString := list.GetListRestrictions().Fields.String()
|
||||
if strings.Contains(fieldString, n1NodeName) {
|
||||
return true, test.pods[n1NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n2NodeName) {
|
||||
return true, test.pods[n2NodeName], nil
|
||||
}
|
||||
if strings.Contains(fieldString, n3NodeName) {
|
||||
return true, test.pods[n3NodeName], nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Failed to list: %v", list)
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
if node, exists := test.nodes[getAction.GetName()]; exists {
|
||||
return true, node, nil
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*v1beta1.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for _, node := range test.nodes {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(fakeClient, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.expectedPodsEvicted,
|
||||
nodes,
|
||||
)
|
||||
|
||||
evictPodsFromTargetNodes(targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortPodsByPriority(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
|
||||
lowPriority := int32(0)
|
||||
highPriority := int32(10000)
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
|
||||
p1.Spec.Priority = &lowPriority
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, setLowPriority)
|
||||
|
||||
// BestEffort
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
|
||||
p2.Spec.Priority = &highPriority
|
||||
|
||||
p2.Spec.Containers[0].Resources.Requests = nil
|
||||
p2.Spec.Containers[0].Resources.Limits = nil
|
||||
p2 := test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBestEffortPod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
|
||||
p3.Spec.Priority = &highPriority
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
|
||||
p4.Spec.Priority = &highPriority
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
p4 := test.BuildTestPod("p4", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
setHighPriority(pod)
|
||||
makeGuaranteedPod(pod)
|
||||
})
|
||||
|
||||
// Best effort with nil priorities.
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
|
||||
p5 := test.BuildTestPod("p5", 400, 100, n1.Name, makeBestEffortPod)
|
||||
p5.Spec.Priority = nil
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
|
||||
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
|
||||
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, makeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
@@ -327,3 +458,182 @@ func TestValidateThresholds(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newFake(objects ...runtime.Object) *core.Fake {
|
||||
scheme := runtime.NewScheme()
|
||||
codecs := serializer.NewCodecFactory(scheme)
|
||||
fake.AddToScheme(scheme)
|
||||
o := core.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range objects {
|
||||
if err := o.Add(obj); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
fakePtr := core.Fake{}
|
||||
fakePtr.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
objs, err := o.List(
|
||||
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
|
||||
action.GetNamespace(),
|
||||
)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
obj := &v1.PodList{
|
||||
Items: []v1.Pod{},
|
||||
}
|
||||
for _, pod := range objs.(*v1.PodList).Items {
|
||||
podFieldSet := fields.Set(map[string]string{
|
||||
"spec.nodeName": pod.Spec.NodeName,
|
||||
"status.phase": string(pod.Status.Phase),
|
||||
})
|
||||
match := action.(core.ListAction).GetListRestrictions().Fields.Matches(podFieldSet)
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
obj.Items = append(obj.Items, *pod.DeepCopy())
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
fakePtr.AddReactor("*", "*", core.ObjectReaction(o))
|
||||
fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
|
||||
|
||||
return &fakePtr
|
||||
}
|
||||
|
||||
func TestWithTaints(t *testing.T) {
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 70,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 1000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 1000, 3000, 10, nil)
|
||||
n3withTaints := n3.DeepCopy()
|
||||
n3withTaints.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
|
||||
podThatToleratesTaint := test.BuildTestPod("tolerate_pod", 200, 0, n1.Name, setRSOwnerRef)
|
||||
podThatToleratesTaint.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected int
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
nodes: []*v1.Node{n1, n2, n3},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
// Node 2 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n2.Name), 200, 0, n2.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
{
|
||||
name: "No pod tolerates node taint",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_8_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
{
|
||||
name: "Pod which tolerates node taint",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
//Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, setRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, setRSOwnerRef),
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range tests {
|
||||
t.Run(item.name, func(t *testing.T) {
|
||||
var objs []runtime.Object
|
||||
for _, node := range item.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
|
||||
for _, pod := range item.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakePtr := newFake(objs...)
|
||||
var evictionCounter int
|
||||
fakePtr.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() != "eviction" || action.GetResource().Resource != "pods" {
|
||||
return false, nil, nil
|
||||
}
|
||||
evictionCounter++
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
&fake.Clientset{Fake: *fakePtr},
|
||||
"policy/v1",
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
item.nodes,
|
||||
)
|
||||
|
||||
LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||
|
||||
if item.evictionsExpected != evictionCounter {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,26 +18,16 @@ package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
evictedPodCount := 0
|
||||
if !strategy.Enabled {
|
||||
return evictedPodCount
|
||||
}
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
@@ -46,31 +36,25 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
|
||||
nodepodCount[node]++
|
||||
if _, err := podEvictor.EvictPod(pod, node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
evictedPodCount += nodepodCount[node]
|
||||
}
|
||||
default:
|
||||
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||
return evictedPodCount
|
||||
}
|
||||
}
|
||||
klog.V(1).Infof("Evicted %v pods", evictedPodCount)
|
||||
return evictedPodCount
|
||||
klog.V(1).Infof("Evicted %v pods", podEvictor.TotalEvicted())
|
||||
}
|
||||
|
||||
@@ -23,9 +23,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -42,17 +41,17 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node) []v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
@@ -73,8 +72,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -93,25 +92,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
npe utils.NodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "Strategy disabled, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
@@ -125,7 +107,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -134,7 +115,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +123,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
@@ -152,7 +131,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 1,
|
||||
},
|
||||
{
|
||||
@@ -161,7 +139,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
npe: utils.NodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
}
|
||||
@@ -173,11 +150,16 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
|
||||
ds := options.DeschedulerServer{
|
||||
Client: fakeClient,
|
||||
}
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false)
|
||||
RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -28,112 +27,27 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
TolerationOpExists v1.TolerationOperator = "Exists"
|
||||
TolerationOpEqual v1.TolerationOperator = "Equal"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingNodeTaints with elimination strategy
|
||||
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deletePodsViolatingNodeTaints(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func deletePodsViolatingNodeTaints(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
podsEvicted := 0
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
return 0
|
||||
return
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if !checkPodsSatisfyTolerations(pods[i], node) {
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
klog.Errorf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
nodePodCount[node]++
|
||||
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||
if !utils.TolerationsTolerateTaintsWithFilter(
|
||||
pods[i].Spec.Tolerations,
|
||||
node.Spec.Taints,
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodePodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
// checkPodsSatisfyTolerations checks if the node's taints (NoSchedule) are still satisfied by pods' tolerations.
|
||||
func checkPodsSatisfyTolerations(pod *v1.Pod, node *v1.Node) bool {
|
||||
tolerations := pod.Spec.Tolerations
|
||||
taints := node.Spec.Taints
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
noScheduleTaints := getNoScheduleTaints(taints)
|
||||
if !allTaintsTolerated(noScheduleTaints, tolerations) {
|
||||
klog.V(2).Infof("Not all taints are tolerated after update for Pod %v on node %v", pod.Name, node.Name)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getNoScheduleTaints return a slice of NoSchedule taints from the a slice of taints that it receives.
|
||||
func getNoScheduleTaints(taints []v1.Taint) []v1.Taint {
|
||||
result := []v1.Taint{}
|
||||
for i := range taints {
|
||||
if taints[i].Effect == v1.TaintEffectNoSchedule {
|
||||
result = append(result, taints[i])
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
//toleratesTaint returns true if a toleration tolerates a taint, or false otherwise
|
||||
func toleratesTaint(toleration *v1.Toleration, taint *v1.Taint) bool {
|
||||
|
||||
if (len(toleration.Key) > 0 && toleration.Key != taint.Key) ||
|
||||
(len(toleration.Effect) > 0 && toleration.Effect != taint.Effect) {
|
||||
return false
|
||||
}
|
||||
switch toleration.Operator {
|
||||
// empty operator means Equal
|
||||
case "", TolerationOpEqual:
|
||||
return toleration.Value == taint.Value
|
||||
case TolerationOpExists:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// allTaintsTolerated returns true if all are tolerated, or false otherwise.
|
||||
func allTaintsTolerated(taints []v1.Taint, tolerations []v1.Toleration) bool {
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(tolerations) == 0 && len(taints) > 0 {
|
||||
return false
|
||||
}
|
||||
for i := range taints {
|
||||
tolerated := false
|
||||
for j := range tolerations {
|
||||
if toleratesTaint(&tolerations[j], &taints[i]) {
|
||||
tolerated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tolerated {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -42,17 +44,17 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||
|
||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -60,11 +62,11 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node2.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node2.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node2.Name)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
@@ -99,7 +101,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
evictLocalStoragePods bool
|
||||
npe utils.NodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
@@ -109,7 +110,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p2, *p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
},
|
||||
@@ -118,7 +118,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p3, *p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
},
|
||||
@@ -127,7 +126,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p1, *p5, *p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
npe: utils.NodePodEvictedCount{node1: 0},
|
||||
maxPodsToEvict: 1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
},
|
||||
@@ -136,7 +134,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
@@ -145,7 +142,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p9, *p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -154,7 +150,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
pods: []v1.Pod{*p7, *p8, *p10, *p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
npe: utils.NodePodEvictedCount{node2: 0},
|
||||
maxPodsToEvict: 0,
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
@@ -168,7 +163,16 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
|
||||
actualEvictedPodCount := deletePodsViolatingNodeTaints(fakeClient, "v1", tc.nodes, false, tc.npe, tc.maxPodsToEvict, tc.evictLocalStoragePods)
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
}
|
||||
@@ -188,7 +192,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has no value, expect tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "foo",
|
||||
Operator: TolerationOpExists,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
taint: v1.Taint{
|
||||
@@ -201,7 +205,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has some value, expect tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "foo",
|
||||
Operator: TolerationOpExists,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
taint: v1.Taint{
|
||||
@@ -215,7 +219,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same effect, toleration has empty key and operator is Exists, means match all taints, expect tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "",
|
||||
Operator: TolerationOpExists,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
taint: v1.Taint{
|
||||
@@ -229,7 +233,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same key, effect and value, and operator is Equal, expect tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "foo",
|
||||
Operator: TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@@ -244,7 +248,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same key and effect, but different values, and operator is Equal, expect not tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "foo",
|
||||
Operator: TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "value1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@@ -259,7 +263,7 @@ func TestToleratesTaint(t *testing.T) {
|
||||
description: "toleration and taint have the same key and value, but different effects, and operator is Equal, expect not tolerated",
|
||||
toleration: v1.Toleration{
|
||||
Key: "foo",
|
||||
Operator: TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
@@ -272,27 +276,8 @@ func TestToleratesTaint(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
if tolerated := toleratesTaint(&tc.toleration, &tc.taint); tc.expectTolerated != tolerated {
|
||||
if tolerated := tc.toleration.ToleratesTaint(&tc.taint); tc.expectTolerated != tolerated {
|
||||
t.Errorf("[%s] expect %v, got %v: toleration %+v, taint %s", tc.description, tc.expectTolerated, tolerated, tc.toleration, tc.taint.ToString())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterNoExecuteTaints(t *testing.T) {
|
||||
taints := []v1.Taint{
|
||||
{
|
||||
Key: "one",
|
||||
Value: "one",
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Key: "two",
|
||||
Value: "two",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
taints = getNoScheduleTaints(taints)
|
||||
if len(taints) != 1 || taints[0].Key != "two" {
|
||||
t.Errorf("Filtering doesn't work. Got %v", taints)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -29,35 +28,24 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount utils.NodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount utils.NodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
podsEvicted := 0
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return 0
|
||||
return
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if maxPodsToEvict > 0 && nodePodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
klog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
nodePodCount[node]++
|
||||
klog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
|
||||
success, err := podEvictor.EvictPod(pods[i], node)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v\n because of existing anti-affinity", pods[i].Name)
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
// Update pods.
|
||||
@@ -67,9 +55,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodePodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
// checkPodsWithAntiAffinityExist checks if there are other pods on the node that the current pod cannot tolerate.
|
||||
|
||||
@@ -24,16 +24,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -45,26 +46,49 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
setPodAntiAffinity(p3)
|
||||
setPodAntiAffinity(p4)
|
||||
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
npe := utils.NodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 3
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
tests := []struct {
|
||||
description string
|
||||
maxPodsToEvict int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
maxPodsToEvict: 0,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvict: 3,
|
||||
pods: []v1.Pod{*p1, *p2, *p3, *p4},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount = 1
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
|
||||
for _, test := range tests {
|
||||
// create fake client
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
test.maxPodsToEvict,
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
69
pkg/descheduler/strategies/pod_lifetime.go
Normal file
69
pkg/descheduler/strategies/pod_lifetime.go
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods := listOldPodsOnNode(client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(pod, node)
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v\n because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("Error evicting pod: (%#v)", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var oldPods []*v1.Pod
|
||||
for _, pod := range pods {
|
||||
podAgeSeconds := uint(v1meta.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
if podAgeSeconds > maxAge {
|
||||
oldPods = append(oldPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return oldPods
|
||||
}
|
||||
167
pkg/descheduler/strategies/pod_lifetime_test.go
Normal file
167
pkg/descheduler/strategies/pod_lifetime_test.go
Normal file
@@ -0,0 +1,167 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodLifeTime(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
||||
newerPodCreationTime := metav1.NewTime(time.Now())
|
||||
|
||||
// Setup pods, one should be evicted
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p1.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p2.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
// Setup pods, zero should be evicted
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p3.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name, nil)
|
||||
p4.Namespace = "dev"
|
||||
p4.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
|
||||
ownerRef2 := test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = ownerRef2
|
||||
p4.ObjectMeta.OwnerReferences = ownerRef2
|
||||
|
||||
// Setup pods, one should be evicted
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name, nil)
|
||||
p5.Namespace = "dev"
|
||||
p5.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name, nil)
|
||||
p6.Namespace = "dev"
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 605))
|
||||
|
||||
ownerRef3 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p6.ObjectMeta.OwnerReferences = ownerRef3
|
||||
|
||||
// Setup pods, zero should be evicted
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name, nil)
|
||||
p7.Namespace = "dev"
|
||||
p7.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name, nil)
|
||||
p8.Namespace = "dev"
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 595))
|
||||
|
||||
ownerRef4 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef4
|
||||
p6.ObjectMeta.OwnerReferences = ownerRef4
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
strategy api.DeschedulerStrategy
|
||||
maxPodsToEvict int
|
||||
pods []v1.Pod
|
||||
expectedEvictedPodCount int
|
||||
}{
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 1 is new and 1 very is old. 1 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
pods: []v1.Pod{*p1, *p2},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 2 are new and 0 are old. 0 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
pods: []v1.Pod{*p3, *p4},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 1 created 605 seconds ago. 1 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
pods: []v1.Pod{*p5, *p6},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Two pods in the `dev` Namespace, 1 created 595 seconds ago. 0 should be evicted.",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
},
|
||||
maxPodsToEvict: 5,
|
||||
pods: []v1.Pod{*p7, *p8},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
PodLifeTime(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
74
pkg/descheduler/strategies/toomanyrestarts.go
Normal file
74
pkg/descheduler/strategies/toomanyrestarts.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
||||
return
|
||||
}
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
for i, pod := range pods {
|
||||
restarts, initRestarts := calcContainerRestarts(pod)
|
||||
if strategy.Params.PodsHavingTooManyRestarts.IncludingInitContainers {
|
||||
if restarts+initRestarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// calcContainerRestarts get container restarts and init container restarts.
|
||||
func calcContainerRestarts(pod *v1.Pod) (int32, int32) {
|
||||
var restarts, initRestarts int32
|
||||
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
restarts += cs.RestartCount
|
||||
}
|
||||
|
||||
for _, cs := range pod.Status.InitContainerStatuses {
|
||||
initRestarts += cs.RestartCount
|
||||
}
|
||||
|
||||
return restarts, initRestarts
|
||||
}
|
||||
181
pkg/descheduler/strategies/toomanyrestarts_test.go
Normal file
181
pkg/descheduler/strategies/toomanyrestarts_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func initPods(node *v1.Node) []v1.Pod {
|
||||
pods := make([]v1.Pod, 0)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// pod at index i will have 25 * i restarts.
|
||||
pod.Status = v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 5 * i,
|
||||
},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
},
|
||||
}
|
||||
pods = append(pods, *pod)
|
||||
}
|
||||
|
||||
// The following 3 pods won't get evicted.
|
||||
// A daemonset.
|
||||
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pods[7].Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pods[8].Annotations = test.GetMirrorPodAnnotation()
|
||||
|
||||
return pods
|
||||
}
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
Enabled: enabled,
|
||||
Params: api.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &api.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: restartThresholds,
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
strategy: createStrategy(true, true, 10000),
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*25),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*25),
|
||||
expectedEvictedPodCount: 5,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pods evictions",
|
||||
strategy: createStrategy(true, true, 1*20),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pods evictions",
|
||||
strategy: createStrategy(true, false, 1*20),
|
||||
expectedEvictedPodCount: 6,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, true, 5*25+1),
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
strategy: createStrategy(true, false, 5*20+1),
|
||||
expectedEvictedPodCount: 1,
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvict=3), 3 pods evictions",
|
||||
strategy: createStrategy(true, true, 1),
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvict: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
pods := initPods(node)
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: pods}, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,10 +2,12 @@ package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -179,3 +181,15 @@ func maxResourceList(list, new v1.ResourceList) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PodToleratesTaints returns true if a pod tolerates one node's taints
|
||||
func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
for nodeName, taintsForNode := range taintsOfNodes {
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||
return true
|
||||
}
|
||||
klog.V(5).Infof("pod: %#v doesn't tolerate node %s's taints", pod.Name, nodeName)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package utils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/klog"
|
||||
@@ -126,3 +126,35 @@ func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (label
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations.
|
||||
func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool {
|
||||
for i := range tolerations {
|
||||
if tolerations[i].ToleratesTaint(taint) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type taintsFilterFunc func(*v1.Taint) bool
|
||||
|
||||
// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates
|
||||
// all the taints that apply to the filter in given taint list.
|
||||
func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool {
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for i := range taints {
|
||||
if applyFilter != nil && !applyFilter(&taints[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !TolerationsTolerateTaint(tolerations, &taints[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
||||
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
|
||||
// to compute them again in each strategy.
|
||||
|
||||
// NodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
|
||||
type NodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
// InitializeNodePodCount initializes the nodePodCount.
|
||||
func InitializeNodePodCount(nodeList []*v1.Node) NodePodEvictedCount {
|
||||
var nodePodCount = make(NodePodEvictedCount)
|
||||
for _, node := range nodeList {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
}
|
||||
return nodePodCount
|
||||
}
|
||||
@@ -24,6 +24,8 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
@@ -31,11 +33,11 @@ import (
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func MakePodSpec() v1.PodSpec {
|
||||
@@ -93,7 +95,7 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
||||
}
|
||||
|
||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||
func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||
func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
|
||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||
thresholds[v1.ResourceMemory] = 20
|
||||
@@ -108,16 +110,30 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
|
||||
nodes, err := nodeutil.ReadyNodes(clientset, nodeInformer, "", stopChannel)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
|
||||
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
|
||||
ds := &options.DeschedulerServer{Client: clientset}
|
||||
nodePodCount := utils.InitializeNodePodCount(nodes)
|
||||
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: deschedulerapi.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
clientset,
|
||||
evictionPolicyGroupVersion,
|
||||
false,
|
||||
0,
|
||||
nodes,
|
||||
)
|
||||
|
||||
strategies.LowNodeUtilization(clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
@@ -132,6 +148,14 @@ func TestE2E(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
stopChannel := make(chan struct{}, 0)
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||
// So, the last node would have least utilization.
|
||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||
@@ -139,7 +163,7 @@ func TestE2E(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
evictPods(t, clientSet, nodeList, rc)
|
||||
evictPods(t, clientSet, nodeInformer, nodeList, rc)
|
||||
|
||||
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
||||
@@ -156,7 +180,7 @@ func TestE2E(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
evictPods(t, clientSet, nodeList, rc)
|
||||
evictPods(t, clientSet, nodeInformer, nodeList, rc)
|
||||
}
|
||||
|
||||
func TestDeschedulingInterval(t *testing.T) {
|
||||
@@ -173,8 +197,13 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
|
||||
c := make(chan bool)
|
||||
go func() {
|
||||
err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy)
|
||||
if err != nil {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(s.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Errorf("Error when checking support for eviction: %v", err)
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
if err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil {
|
||||
t.Errorf("Error running descheduler strategies: %+v", err)
|
||||
}
|
||||
c <- true
|
||||
@@ -188,7 +217,7 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||
func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||
var leastLoadedNode v1.Node
|
||||
podsBefore := math.MaxInt16
|
||||
for i := range nodeList.Items {
|
||||
@@ -208,7 +237,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeLis
|
||||
}
|
||||
}
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(clientSet)
|
||||
startEndToEndForLowNodeUtilization(clientSet, nodeInformer)
|
||||
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
|
||||
@@ -15,5 +15,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This just run e2e tests.
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.17.5}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.8.1/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
kind create cluster --image kindest/node:${K8S_VERSION} --config=./hack/kind_config.yaml
|
||||
export KUBECONFIG="$(kind get kubeconfig-path)"
|
||||
docker pull kubernetes/pause
|
||||
kind load docker-image kubernetes/pause
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
export KUBECONFIG="/tmp/admin.conf"
|
||||
mkdir -p ~/gopath/src/sigs.k8s.io/
|
||||
mv ~/gopath/src/github.com/kubernetes-sigs/descheduler ~/gopath/src/sigs.k8s.io/.
|
||||
fi
|
||||
|
||||
PRJ_PREFIX="sigs.k8s.io/descheduler"
|
||||
go test ${PRJ_PREFIX}/test/e2e/ -v
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod {
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
@@ -50,7 +50,9 @@ func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod
|
||||
if memory >= 0 {
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||
}
|
||||
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
@@ -85,7 +87,7 @@ func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||
}
|
||||
|
||||
// BuildTestNode creates a node with specified capacity.
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node {
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply func(*v1.Node)) *v1.Node {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
@@ -109,5 +111,8 @@ func BuildTestNode(name string, millicpu int64, mem int64, pods int64) *v1.Node
|
||||
},
|
||||
},
|
||||
}
|
||||
if apply != nil {
|
||||
apply(node)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
2
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
2
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@@ -189,6 +189,8 @@ type Marshaler interface {
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
sizVar := SizeVarint(uint64(siz))
|
||||
p.grow(siz + sizVar)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
18
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@@ -948,13 +948,19 @@ func isProto3Zero(v reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion2 = true
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
const GoGoProtoPackageIsVersion1 = true
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
GoGoProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
|
||||
71
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
71
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@@ -43,7 +43,6 @@ package proto
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -205,7 +204,7 @@ func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -225,7 +224,7 @@ func (p *Properties) Parse(s string) {
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -400,6 +399,15 @@ func GetProperties(t reflect.Type) *StructProperties {
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
@@ -441,37 +449,40 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
|
||||
if isOneofMessage {
|
||||
var oots []interface{}
|
||||
_, _, _, oots = om.XXX_OneofFuncs()
|
||||
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
17
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
17
vendor/github.com/gogo/protobuf/proto/table_marshal.go
generated
vendored
@@ -389,8 +389,13 @@ func (u *marshalInfo) computeMarshalInfo() {
|
||||
// get oneof implementers
|
||||
var oneofImplementers []interface{}
|
||||
// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
if isOneofMessage {
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
}
|
||||
|
||||
// normal fields
|
||||
@@ -519,10 +524,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI
|
||||
}
|
||||
}
|
||||
|
||||
type oneofMessage interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
|
||||
// wiretype returns the wire encoding of the type.
|
||||
func wiretype(encoding string) uint64 {
|
||||
switch encoding {
|
||||
@@ -2968,7 +2969,9 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||
if m, ok := pb.(newMarshaler); ok {
|
||||
siz := m.XXX_Size()
|
||||
p.grow(siz) // make sure buf has enough capacity
|
||||
p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
|
||||
pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
|
||||
pp, err = m.XXX_Marshal(pp, p.deterministic)
|
||||
p.buf = append(p.buf, pp...)
|
||||
return err
|
||||
}
|
||||
if m, ok := pb.(Marshaler); ok {
|
||||
|
||||
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
19
vendor/github.com/gogo/protobuf/proto/table_merge.go
generated
vendored
@@ -530,6 +530,25 @@ func (mi *mergeInfo) computeMergeInfo() {
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case isSlice && !isPointer: // E.g. []pb.T
|
||||
mergeInfo := getMergeInfo(tf)
|
||||
zero := reflect.Zero(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// TODO: Make this faster?
|
||||
dstsp := dst.asPointerTo(f.Type)
|
||||
dsts := dstsp.Elem()
|
||||
srcs := src.asPointerTo(f.Type).Elem()
|
||||
for i := 0; i < srcs.Len(); i++ {
|
||||
dsts = reflect.Append(dsts, zero)
|
||||
srcElement := srcs.Index(i).Addr()
|
||||
dstElement := dsts.Index(dsts.Len() - 1).Addr()
|
||||
mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
|
||||
}
|
||||
if dsts.IsNil() {
|
||||
dsts = reflect.MakeSlice(f.Type, 0, 0)
|
||||
}
|
||||
dstsp.Elem().Set(dsts)
|
||||
}
|
||||
case !isPointer:
|
||||
mergeInfo := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
|
||||
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
22
vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
generated
vendored
@@ -371,15 +371,18 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
}
|
||||
|
||||
// Find any types associated with oneof fields.
|
||||
// TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
|
||||
// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
|
||||
if fn.IsValid() && len(oneofFields) > 0 {
|
||||
res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
|
||||
for i := res.Len() - 1; i >= 0; i-- {
|
||||
v := res.Index(i) // interface{}
|
||||
tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
if len(oneofFields) > 0 {
|
||||
var oneofImplementers []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oneofImplementers = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oneofImplementers = m.XXX_OneofWrappers()
|
||||
}
|
||||
for _, v := range oneofImplementers {
|
||||
tptr := reflect.TypeOf(v) // *Msg_X
|
||||
typ := tptr.Elem() // Msg_X
|
||||
|
||||
f := typ.Field(0) // oneof implementers have one field
|
||||
baseUnmarshal := fieldUnmarshaler(&f)
|
||||
@@ -407,11 +410,12 @@ func (u *unmarshalInfo) computeUnmarshalInfo() {
|
||||
u.setTag(fieldNum, of.field, unmarshal, 0, name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Get extension ranges, if any.
|
||||
fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
|
||||
if fn.IsValid() {
|
||||
if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
|
||||
panic("a message with extensions, but no extensions field in " + t.Name())
|
||||
|
||||
6
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
6
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@@ -476,6 +476,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
@@ -589,8 +591,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if v.Type().Implements(textMarshalerType) {
|
||||
text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
17
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
17
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
@@ -7,6 +7,7 @@ package terminal
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
@@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
|
||||
// readPasswordLine reads from reader until it finds \n or io.EOF.
|
||||
// The slice returned does not include the \n.
|
||||
// readPasswordLine also ignores any \r it finds.
|
||||
// Windows uses \r as end of line. So, on Windows, readPasswordLine
|
||||
// reads until it finds \r and ignores any \n it finds during processing.
|
||||
func readPasswordLine(reader io.Reader) ([]byte, error) {
|
||||
var buf [1]byte
|
||||
var ret []byte
|
||||
@@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) {
|
||||
n, err := reader.Read(buf[:])
|
||||
if n > 0 {
|
||||
switch buf[0] {
|
||||
case '\b':
|
||||
if len(ret) > 0 {
|
||||
ret = ret[:len(ret)-1]
|
||||
}
|
||||
case '\n':
|
||||
return ret, nil
|
||||
if runtime.GOOS != "windows" {
|
||||
return ret, nil
|
||||
}
|
||||
// otherwise ignore \n
|
||||
case '\r':
|
||||
// remove \r from passwords on Windows
|
||||
if runtime.GOOS == "windows" {
|
||||
return ret, nil
|
||||
}
|
||||
// otherwise ignore \r
|
||||
default:
|
||||
ret = append(ret, buf[0])
|
||||
}
|
||||
|
||||
4
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
@@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||
}
|
||||
old := st
|
||||
|
||||
st &^= (windows.ENABLE_ECHO_INPUT)
|
||||
st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
|
||||
st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT)
|
||||
st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT)
|
||||
if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
18
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
18
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
@@ -1,12 +1,16 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
- "1.4.x"
|
||||
- "1.5.x"
|
||||
- "1.6.x"
|
||||
- "1.7.x"
|
||||
- "1.8.x"
|
||||
- "1.9.x"
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "tip"
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
||||
|
||||
14
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
14
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
@@ -319,10 +319,14 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
|
||||
}
|
||||
|
||||
const (
|
||||
// 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
|
||||
// 400,000 decode operations is ~500kb of dense object declarations, or
|
||||
// ~5kb of dense object declarations with 10000% alias expansion
|
||||
alias_ratio_range_low = 400000
|
||||
// 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
|
||||
|
||||
// 4,000,000 decode operations is ~5MB of dense object declarations, or
|
||||
// ~4.5MB of dense object declarations with 10% alias expansion
|
||||
alias_ratio_range_high = 4000000
|
||||
|
||||
// alias_ratio_range is the range over which we scale allowed alias ratios
|
||||
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
|
||||
)
|
||||
@@ -784,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
if n.alias != nil && n.alias.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
@@ -794,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
if ni.alias != nil && ni.alias.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
|
||||
109
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
109
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
@@ -626,30 +626,17 @@ func trace(args ...interface{}) func() {
|
||||
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
|
||||
// While we need more tokens to fetch, do it.
|
||||
for {
|
||||
// Check if we really need to fetch more tokens.
|
||||
need_more_tokens := false
|
||||
|
||||
if parser.tokens_head == len(parser.tokens) {
|
||||
// Queue is empty.
|
||||
need_more_tokens = true
|
||||
} else {
|
||||
// Check if any potential simple key may occupy the head position.
|
||||
if !yaml_parser_stale_simple_keys(parser) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
// If queue is non-empty, check if any potential simple key may
|
||||
// occupy the head position.
|
||||
head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
|
||||
if !ok {
|
||||
break
|
||||
} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
|
||||
return false
|
||||
} else if !valid {
|
||||
break
|
||||
}
|
||||
|
||||
for i := range parser.simple_keys {
|
||||
simple_key := &parser.simple_keys[i]
|
||||
if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
|
||||
need_more_tokens = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We are finished.
|
||||
if !need_more_tokens {
|
||||
break
|
||||
}
|
||||
// Fetch the next token.
|
||||
if !yaml_parser_fetch_next_token(parser) {
|
||||
@@ -678,11 +665,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove obsolete potential simple keys.
|
||||
if !yaml_parser_stale_simple_keys(parser) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check the indentation level against the current column.
|
||||
if !yaml_parser_unroll_indent(parser, parser.mark.column) {
|
||||
return false
|
||||
@@ -837,29 +819,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
|
||||
"found character that cannot start any token")
|
||||
}
|
||||
|
||||
// Check the list of potential simple keys and remove the positions that
|
||||
// cannot contain simple keys anymore.
|
||||
func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
|
||||
// Check for a potential simple key for each flow level.
|
||||
for i := range parser.simple_keys {
|
||||
simple_key := &parser.simple_keys[i]
|
||||
|
||||
// The specification requires that a simple key
|
||||
//
|
||||
// - is limited to a single line,
|
||||
// - is shorter than 1024 characters.
|
||||
if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
|
||||
|
||||
// Check if the potential simple key to be removed is required.
|
||||
if simple_key.required {
|
||||
return yaml_parser_set_scanner_error(parser,
|
||||
"while scanning a simple key", simple_key.mark,
|
||||
"could not find expected ':'")
|
||||
}
|
||||
simple_key.possible = false
|
||||
}
|
||||
func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
|
||||
if !simple_key.possible {
|
||||
return false, true
|
||||
}
|
||||
return true
|
||||
|
||||
// The 1.2 specification says:
|
||||
//
|
||||
// "If the ? indicator is omitted, parsing needs to see past the
|
||||
// implicit key to recognize it as such. To limit the amount of
|
||||
// lookahead required, the “:” indicator must appear at most 1024
|
||||
// Unicode characters beyond the start of the key. In addition, the key
|
||||
// is restricted to a single line."
|
||||
//
|
||||
if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
|
||||
// Check if the potential simple key to be removed is required.
|
||||
if simple_key.required {
|
||||
return false, yaml_parser_set_scanner_error(parser,
|
||||
"while scanning a simple key", simple_key.mark,
|
||||
"could not find expected ':'")
|
||||
}
|
||||
simple_key.possible = false
|
||||
return false, true
|
||||
}
|
||||
return true, true
|
||||
}
|
||||
|
||||
// Check if a simple key may start at the current position and add it if
|
||||
@@ -879,13 +862,14 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
|
||||
possible: true,
|
||||
required: required,
|
||||
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
|
||||
mark: parser.mark,
|
||||
}
|
||||
simple_key.mark = parser.mark
|
||||
|
||||
if !yaml_parser_remove_simple_key(parser) {
|
||||
return false
|
||||
}
|
||||
parser.simple_keys[len(parser.simple_keys)-1] = simple_key
|
||||
parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -900,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
|
||||
"while scanning a simple key", parser.simple_keys[i].mark,
|
||||
"could not find expected ':'")
|
||||
}
|
||||
// Remove the key from the stack.
|
||||
parser.simple_keys[i].possible = false
|
||||
delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
|
||||
}
|
||||
// Remove the key from the stack.
|
||||
parser.simple_keys[i].possible = false
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -912,7 +897,12 @@ const max_flow_level = 10000
|
||||
// Increase the flow level and resize the simple key list if needed.
|
||||
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
// Reset the simple key on the next level.
|
||||
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
|
||||
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
|
||||
possible: false,
|
||||
required: false,
|
||||
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
|
||||
mark: parser.mark,
|
||||
})
|
||||
|
||||
// Increase the flow level.
|
||||
parser.flow_level++
|
||||
@@ -928,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
|
||||
if parser.flow_level > 0 {
|
||||
parser.flow_level--
|
||||
parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
|
||||
last := len(parser.simple_keys) - 1
|
||||
delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
|
||||
parser.simple_keys = parser.simple_keys[:last]
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1005,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
|
||||
// Initialize the simple key stack.
|
||||
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
|
||||
|
||||
parser.simple_keys_by_tok = make(map[int]int)
|
||||
|
||||
// A simple key is allowed at the beginning of the stream.
|
||||
parser.simple_key_allowed = true
|
||||
|
||||
@@ -1286,7 +1280,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
|
||||
simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
|
||||
|
||||
// Have we found a simple key?
|
||||
if simple_key.possible {
|
||||
if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
|
||||
return false
|
||||
|
||||
} else if valid {
|
||||
|
||||
// Create the KEY token and insert it into the queue.
|
||||
token := yaml_token_t{
|
||||
typ: yaml_KEY_TOKEN,
|
||||
@@ -1304,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
|
||||
|
||||
// Remove the simple key.
|
||||
simple_key.possible = false
|
||||
delete(parser.simple_keys_by_tok, simple_key.token_number)
|
||||
|
||||
// A simple key cannot follow another simple key.
|
||||
parser.simple_key_allowed = false
|
||||
|
||||
2
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
2
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
@@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, true)
|
||||
}
|
||||
|
||||
// A Decorder reads and decodes YAML values from an input stream.
|
||||
// A Decoder reads and decodes YAML values from an input stream.
|
||||
type Decoder struct {
|
||||
strict bool
|
||||
parser *parser
|
||||
|
||||
1
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
1
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
@@ -579,6 +579,7 @@ type yaml_parser_t struct {
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
|
||||
|
||||
// Parser stuff
|
||||
|
||||
|
||||
6
vendor/k8s.io/apimachinery/pkg/util/net/http.go
generated
vendored
6
vendor/k8s.io/apimachinery/pkg/util/net/http.go
generated
vendored
@@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string {
|
||||
return result
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the given error is a network timeout error
|
||||
func IsTimeout(err error) bool {
|
||||
neterr, ok := err.(net.Error)
|
||||
return ok && neterr != nil && neterr.Timeout()
|
||||
}
|
||||
|
||||
// IsProbableEOF returns true if the given error resembles a connection termination
|
||||
// scenario that would justify assuming that the watch is empty.
|
||||
// These errors are what the Go http stack returns back to us which are general
|
||||
|
||||
2
vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
generated
vendored
@@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() {
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
|
||||
default:
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
|
||||
} else {
|
||||
sw.result <- Event{
|
||||
|
||||
7
vendor/k8s.io/client-go/discovery/discovery_client.go
generated
vendored
7
vendor/k8s.io/client-go/discovery/discovery_client.go
generated
vendored
@@ -463,6 +463,13 @@ func setDiscoveryDefaults(config *restclient.Config) error {
|
||||
if config.Timeout == 0 {
|
||||
config.Timeout = defaultTimeout
|
||||
}
|
||||
if config.Burst == 0 && config.QPS < 100 {
|
||||
// discovery is expected to be bursty, increase the default burst
|
||||
// to accommodate looking up resource info for many API groups.
|
||||
// matches burst set by ConfigFlags#ToDiscoveryClient().
|
||||
// see https://issue.k8s.io/86149
|
||||
config.Burst = 100
|
||||
}
|
||||
codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()}
|
||||
config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
|
||||
if len(config.UserAgent) == 0 {
|
||||
|
||||
54
vendor/k8s.io/client-go/informers/admissionregistration/interface.go
generated
vendored
Normal file
54
vendor/k8s.io/client-go/informers/admissionregistration/interface.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package admissionregistration
|
||||
|
||||
import (
|
||||
v1 "k8s.io/client-go/informers/admissionregistration/v1"
|
||||
v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1 provides access to shared informers for resources in V1.
|
||||
V1() v1.Interface
|
||||
// V1beta1 provides access to shared informers for resources in V1beta1.
|
||||
V1beta1() v1beta1.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1 returns a new v1.Interface.
|
||||
func (g *group) V1() v1.Interface {
|
||||
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
// V1beta1 returns a new v1beta1.Interface.
|
||||
func (g *group) V1beta1() v1beta1.Interface {
|
||||
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
52
vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go
generated
vendored
Normal file
52
vendor/k8s.io/client-go/informers/admissionregistration/v1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
|
||||
MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer
|
||||
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
|
||||
ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
|
||||
func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer {
|
||||
return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
|
||||
func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer {
|
||||
return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
88
vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
generated
vendored
Normal file
88
vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/admissionregistration/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for
|
||||
// MutatingWebhookConfigurations.
|
||||
type MutatingWebhookConfigurationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.MutatingWebhookConfigurationLister
|
||||
}
|
||||
|
||||
type mutatingWebhookConfigurationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(options)
|
||||
},
|
||||
},
|
||||
&admissionregistrationv1.MutatingWebhookConfiguration{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&admissionregistrationv1.MutatingWebhookConfiguration{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) Lister() v1.MutatingWebhookConfigurationLister {
|
||||
return v1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer())
|
||||
}
|
||||
88
vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
generated
vendored
Normal file
88
vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/admissionregistration/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for
|
||||
// ValidatingWebhookConfigurations.
|
||||
type ValidatingWebhookConfigurationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ValidatingWebhookConfigurationLister
|
||||
}
|
||||
|
||||
type validatingWebhookConfigurationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(options)
|
||||
},
|
||||
},
|
||||
&admissionregistrationv1.ValidatingWebhookConfiguration{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&admissionregistrationv1.ValidatingWebhookConfiguration{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) Lister() v1.ValidatingWebhookConfigurationLister {
|
||||
return v1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer())
|
||||
}
|
||||
52
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
generated
vendored
Normal file
52
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
|
||||
MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer
|
||||
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
|
||||
ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer.
|
||||
func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer {
|
||||
return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer.
|
||||
func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer {
|
||||
return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
88
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
generated
vendored
Normal file
88
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for
|
||||
// MutatingWebhookConfigurations.
|
||||
type MutatingWebhookConfigurationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.MutatingWebhookConfigurationLister
|
||||
}
|
||||
|
||||
type mutatingWebhookConfigurationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options)
|
||||
},
|
||||
},
|
||||
&admissionregistrationv1beta1.MutatingWebhookConfiguration{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&admissionregistrationv1beta1.MutatingWebhookConfiguration{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister {
|
||||
return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer())
|
||||
}
|
||||
88
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
generated
vendored
Normal file
88
vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for
|
||||
// ValidatingWebhookConfigurations.
|
||||
type ValidatingWebhookConfigurationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.ValidatingWebhookConfigurationLister
|
||||
}
|
||||
|
||||
type validatingWebhookConfigurationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options)
|
||||
},
|
||||
},
|
||||
&admissionregistrationv1beta1.ValidatingWebhookConfiguration{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&admissionregistrationv1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister {
|
||||
return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer())
|
||||
}
|
||||
62
vendor/k8s.io/client-go/informers/apps/interface.go
generated
vendored
Normal file
62
vendor/k8s.io/client-go/informers/apps/interface.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
v1 "k8s.io/client-go/informers/apps/v1"
|
||||
v1beta1 "k8s.io/client-go/informers/apps/v1beta1"
|
||||
v1beta2 "k8s.io/client-go/informers/apps/v1beta2"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1 provides access to shared informers for resources in V1.
|
||||
V1() v1.Interface
|
||||
// V1beta1 provides access to shared informers for resources in V1beta1.
|
||||
V1beta1() v1beta1.Interface
|
||||
// V1beta2 provides access to shared informers for resources in V1beta2.
|
||||
V1beta2() v1beta2.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1 returns a new v1.Interface.
|
||||
func (g *group) V1() v1.Interface {
|
||||
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
// V1beta1 returns a new v1beta1.Interface.
|
||||
func (g *group) V1beta1() v1beta1.Interface {
|
||||
return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
// V1beta2 returns a new v1beta2.Interface.
|
||||
func (g *group) V1beta2() v1beta2.Interface {
|
||||
return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/apps/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ControllerRevisionInformer provides access to a shared informer and lister for
|
||||
// ControllerRevisions.
|
||||
type ControllerRevisionInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ControllerRevisionLister
|
||||
}
|
||||
|
||||
type controllerRevisionInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().ControllerRevisions(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().ControllerRevisions(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1.ControllerRevision{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1.ControllerRevision{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister {
|
||||
return v1.NewControllerRevisionLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/apps/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// DaemonSetInformer provides access to a shared informer and lister for
|
||||
// DaemonSets.
|
||||
type DaemonSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.DaemonSetLister
|
||||
}
|
||||
|
||||
type daemonSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewDaemonSetInformer constructs a new informer for DaemonSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().DaemonSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().DaemonSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1.DaemonSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1.DaemonSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) Lister() v1.DaemonSetLister {
|
||||
return v1.NewDaemonSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1/deployment.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1/deployment.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/apps/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// DeploymentInformer provides access to a shared informer and lister for
|
||||
// Deployments.
|
||||
type DeploymentInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.DeploymentLister
|
||||
}
|
||||
|
||||
type deploymentInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().Deployments(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().Deployments(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1.Deployment{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1.Deployment{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Lister() v1.DeploymentLister {
|
||||
return v1.NewDeploymentLister(f.Informer().GetIndexer())
|
||||
}
|
||||
73
vendor/k8s.io/client-go/informers/apps/v1/interface.go
generated
vendored
Normal file
73
vendor/k8s.io/client-go/informers/apps/v1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
ControllerRevisions() ControllerRevisionInformer
|
||||
// DaemonSets returns a DaemonSetInformer.
|
||||
DaemonSets() DaemonSetInformer
|
||||
// Deployments returns a DeploymentInformer.
|
||||
Deployments() DeploymentInformer
|
||||
// ReplicaSets returns a ReplicaSetInformer.
|
||||
ReplicaSets() ReplicaSetInformer
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
StatefulSets() StatefulSetInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
func (v *version) ControllerRevisions() ControllerRevisionInformer {
|
||||
return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// DaemonSets returns a DaemonSetInformer.
|
||||
func (v *version) DaemonSets() DaemonSetInformer {
|
||||
return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// Deployments returns a DeploymentInformer.
|
||||
func (v *version) Deployments() DeploymentInformer {
|
||||
return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// ReplicaSets returns a ReplicaSetInformer.
|
||||
func (v *version) ReplicaSets() ReplicaSetInformer {
|
||||
return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
func (v *version) StatefulSets() StatefulSetInformer {
|
||||
return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/apps/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ReplicaSetInformer provides access to a shared informer and lister for
|
||||
// ReplicaSets.
|
||||
type ReplicaSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.ReplicaSetLister
|
||||
}
|
||||
|
||||
type replicaSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().ReplicaSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().ReplicaSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1.ReplicaSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1.ReplicaSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) Lister() v1.ReplicaSetLister {
|
||||
return v1.NewReplicaSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/apps/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StatefulSetInformer provides access to a shared informer and lister for
|
||||
// StatefulSets.
|
||||
type StatefulSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.StatefulSetLister
|
||||
}
|
||||
|
||||
type statefulSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().StatefulSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1().StatefulSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1.StatefulSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1.StatefulSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Lister() v1.StatefulSetLister {
|
||||
return v1.NewStatefulSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ControllerRevisionInformer provides access to a shared informer and lister for
|
||||
// ControllerRevisions.
|
||||
type ControllerRevisionInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.ControllerRevisionLister
|
||||
}
|
||||
|
||||
type controllerRevisionInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().ControllerRevisions(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta1.ControllerRevision{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta1.ControllerRevision{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister {
|
||||
return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// DeploymentInformer provides access to a shared informer and lister for
|
||||
// Deployments.
|
||||
type DeploymentInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.DeploymentLister
|
||||
}
|
||||
|
||||
type deploymentInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().Deployments(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().Deployments(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta1.Deployment{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta1.Deployment{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Lister() v1beta1.DeploymentLister {
|
||||
return v1beta1.NewDeploymentLister(f.Informer().GetIndexer())
|
||||
}
|
||||
59
vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go
generated
vendored
Normal file
59
vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
ControllerRevisions() ControllerRevisionInformer
|
||||
// Deployments returns a DeploymentInformer.
|
||||
Deployments() DeploymentInformer
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
StatefulSets() StatefulSetInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
func (v *version) ControllerRevisions() ControllerRevisionInformer {
|
||||
return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// Deployments returns a DeploymentInformer.
|
||||
func (v *version) Deployments() DeploymentInformer {
|
||||
return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
func (v *version) StatefulSets() StatefulSetInformer {
|
||||
return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta1 "k8s.io/client-go/listers/apps/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StatefulSetInformer provides access to a shared informer and lister for
|
||||
// StatefulSets.
|
||||
type StatefulSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta1.StatefulSetLister
|
||||
}
|
||||
|
||||
type statefulSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().StatefulSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta1().StatefulSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta1.StatefulSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta1.StatefulSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister {
|
||||
return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ControllerRevisionInformer provides access to a shared informer and lister for
|
||||
// ControllerRevisions.
|
||||
type ControllerRevisionInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.ControllerRevisionLister
|
||||
}
|
||||
|
||||
type controllerRevisionInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().ControllerRevisions(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta2.ControllerRevision{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta2.ControllerRevision{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister {
|
||||
return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// DaemonSetInformer provides access to a shared informer and lister for
|
||||
// DaemonSets.
|
||||
type DaemonSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.DaemonSetLister
|
||||
}
|
||||
|
||||
type daemonSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewDaemonSetInformer constructs a new informer for DaemonSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().DaemonSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().DaemonSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta2.DaemonSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta2.DaemonSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister {
|
||||
return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// DeploymentInformer provides access to a shared informer and lister for
|
||||
// Deployments.
|
||||
type DeploymentInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.DeploymentLister
|
||||
}
|
||||
|
||||
type deploymentInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredDeploymentInformer constructs a new informer for Deployment type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().Deployments(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().Deployments(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta2.Deployment{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta2.Deployment{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *deploymentInformer) Lister() v1beta2.DeploymentLister {
|
||||
return v1beta2.NewDeploymentLister(f.Informer().GetIndexer())
|
||||
}
|
||||
73
vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go
generated
vendored
Normal file
73
vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
ControllerRevisions() ControllerRevisionInformer
|
||||
// DaemonSets returns a DaemonSetInformer.
|
||||
DaemonSets() DaemonSetInformer
|
||||
// Deployments returns a DeploymentInformer.
|
||||
Deployments() DeploymentInformer
|
||||
// ReplicaSets returns a ReplicaSetInformer.
|
||||
ReplicaSets() ReplicaSetInformer
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
StatefulSets() StatefulSetInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// ControllerRevisions returns a ControllerRevisionInformer.
|
||||
func (v *version) ControllerRevisions() ControllerRevisionInformer {
|
||||
return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// DaemonSets returns a DaemonSetInformer.
|
||||
func (v *version) DaemonSets() DaemonSetInformer {
|
||||
return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// Deployments returns a DeploymentInformer.
|
||||
func (v *version) Deployments() DeploymentInformer {
|
||||
return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// ReplicaSets returns a ReplicaSetInformer.
|
||||
func (v *version) ReplicaSets() ReplicaSetInformer {
|
||||
return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// StatefulSets returns a StatefulSetInformer.
|
||||
func (v *version) StatefulSets() StatefulSetInformer {
|
||||
return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ReplicaSetInformer provides access to a shared informer and lister for
|
||||
// ReplicaSets.
|
||||
type ReplicaSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.ReplicaSetLister
|
||||
}
|
||||
|
||||
type replicaSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewReplicaSetInformer constructs a new informer for ReplicaSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().ReplicaSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().ReplicaSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta2.ReplicaSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta2.ReplicaSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister {
|
||||
return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1beta2 "k8s.io/client-go/listers/apps/v1beta2"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// StatefulSetInformer provides access to a shared informer and lister for
|
||||
// StatefulSets.
|
||||
type StatefulSetInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1beta2.StatefulSetLister
|
||||
}
|
||||
|
||||
type statefulSetInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().StatefulSets(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AppsV1beta2().StatefulSets(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&appsv1beta2.StatefulSet{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&appsv1beta2.StatefulSet{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister {
|
||||
return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer())
|
||||
}
|
||||
46
vendor/k8s.io/client-go/informers/auditregistration/interface.go
generated
vendored
Normal file
46
vendor/k8s.io/client-go/informers/auditregistration/interface.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package auditregistration
|
||||
|
||||
import (
|
||||
v1alpha1 "k8s.io/client-go/informers/auditregistration/v1alpha1"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1alpha1 provides access to shared informers for resources in V1alpha1.
|
||||
V1alpha1() v1alpha1.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1alpha1 returns a new v1alpha1.Interface.
|
||||
func (g *group) V1alpha1() v1alpha1.Interface {
|
||||
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
88
vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go
generated
vendored
Normal file
88
vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/auditsink.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1alpha1 "k8s.io/client-go/listers/auditregistration/v1alpha1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// AuditSinkInformer provides access to a shared informer and lister for
|
||||
// AuditSinks.
|
||||
type AuditSinkInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1alpha1.AuditSinkLister
|
||||
}
|
||||
|
||||
type auditSinkInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewAuditSinkInformer constructs a new informer for AuditSink type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredAuditSinkInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredAuditSinkInformer constructs a new informer for AuditSink type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredAuditSinkInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AuditregistrationV1alpha1().AuditSinks().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AuditregistrationV1alpha1().AuditSinks().Watch(options)
|
||||
},
|
||||
},
|
||||
&auditregistrationv1alpha1.AuditSink{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *auditSinkInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredAuditSinkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *auditSinkInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&auditregistrationv1alpha1.AuditSink{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *auditSinkInformer) Lister() v1alpha1.AuditSinkLister {
|
||||
return v1alpha1.NewAuditSinkLister(f.Informer().GetIndexer())
|
||||
}
|
||||
45
vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go
generated
vendored
Normal file
45
vendor/k8s.io/client-go/informers/auditregistration/v1alpha1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// AuditSinks returns a AuditSinkInformer.
|
||||
AuditSinks() AuditSinkInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// AuditSinks returns a AuditSinkInformer.
|
||||
func (v *version) AuditSinks() AuditSinkInformer {
|
||||
return &auditSinkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
62
vendor/k8s.io/client-go/informers/autoscaling/interface.go
generated
vendored
Normal file
62
vendor/k8s.io/client-go/informers/autoscaling/interface.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
v1 "k8s.io/client-go/informers/autoscaling/v1"
|
||||
v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1"
|
||||
v2beta2 "k8s.io/client-go/informers/autoscaling/v2beta2"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
type Interface interface {
|
||||
// V1 provides access to shared informers for resources in V1.
|
||||
V1() v1.Interface
|
||||
// V2beta1 provides access to shared informers for resources in V2beta1.
|
||||
V2beta1() v2beta1.Interface
|
||||
// V2beta2 provides access to shared informers for resources in V2beta2.
|
||||
V2beta2() v2beta2.Interface
|
||||
}
|
||||
|
||||
type group struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// V1 returns a new v1.Interface.
|
||||
func (g *group) V1() v1.Interface {
|
||||
return v1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
// V2beta1 returns a new v2beta1.Interface.
|
||||
func (g *group) V2beta1() v2beta1.Interface {
|
||||
return v2beta1.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
|
||||
// V2beta2 returns a new v2beta2.Interface.
|
||||
func (g *group) V2beta2() v2beta2.Interface {
|
||||
return v2beta2.New(g.factory, g.namespace, g.tweakListOptions)
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/client-go/listers/autoscaling/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
|
||||
// HorizontalPodAutoscalers.
|
||||
type HorizontalPodAutoscalerInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.HorizontalPodAutoscalerLister
|
||||
}
|
||||
|
||||
type horizontalPodAutoscalerInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscalingv1.HorizontalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscalingv1.HorizontalPodAutoscaler{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister {
|
||||
return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
|
||||
}
|
||||
45
vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go
generated
vendored
Normal file
45
vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
type Interface interface {
|
||||
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
|
||||
HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
namespace string
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// New returns a new Interface.
|
||||
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
|
||||
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
|
||||
}
|
||||
|
||||
// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer.
|
||||
func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer {
|
||||
return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
89
vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
generated
vendored
Normal file
89
vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v2beta1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for
|
||||
// HorizontalPodAutoscalers.
|
||||
type HorizontalPodAutoscalerInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v2beta1.HorizontalPodAutoscalerLister
|
||||
}
|
||||
|
||||
type horizontalPodAutoscalerInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscalingv2beta1.HorizontalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&autoscalingv2beta1.HorizontalPodAutoscaler{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister {
|
||||
return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer())
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user