mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6af54ab30 | ||
|
|
e41ef8cca3 | ||
|
|
d26cd4b317 | ||
|
|
f7d0acb731 | ||
|
|
f1f8b2eaa7 | ||
|
|
0a7f14d75e | ||
|
|
de76f9b14c | ||
|
|
8b84bb26ff | ||
|
|
bb25192163 | ||
|
|
40bb490f4c | ||
|
|
08729f6ef9 | ||
|
|
3dd7de8132 | ||
|
|
471aeb5ea4 | ||
|
|
65e7093ee7 | ||
|
|
fc0cd4ba30 | ||
|
|
ba3eac6c57 | ||
|
|
11a95ce8fb | ||
|
|
29a9fc6b56 |
@@ -4,4 +4,4 @@ go:
|
||||
script:
|
||||
- hack/verify-gofmt.sh
|
||||
- make build
|
||||
- make test
|
||||
- make test-unit
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -11,10 +11,16 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.9.2
|
||||
|
||||
FROM fedora
|
||||
WORKDIR /go/src/github.com/kubernetes-incubator/descheduler
|
||||
COPY . .
|
||||
RUN make
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
CMD ["/bin/descheduler --help"]
|
||||
COPY --from=0 /go/src/github.com/kubernetes-incubator/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
|
||||
20
Dockerfile.dev
Normal file
20
Dockerfile.dev
Normal file
@@ -0,0 +1,20 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Avesh Agarwal <avagarwa@redhat.com>
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
CMD ["/bin/descheduler", "--help"]
|
||||
18
Makefile
18
Makefile
@@ -30,13 +30,25 @@ IMAGE:=descheduler:$(VERSION)
|
||||
all: build
|
||||
|
||||
build:
|
||||
go build ${LDFLAGS} -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
|
||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler github.com/kubernetes-incubator/descheduler/cmd/descheduler
|
||||
|
||||
image: build
|
||||
dev-image: build
|
||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||
|
||||
image:
|
||||
docker build -t $(IMAGE) .
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
|
||||
test:
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
test-e2e:
|
||||
./test/run-e2e-tests.sh
|
||||
|
||||
gen:
|
||||
./hack/update-codecgen.sh
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
|
||||
42
README.md
42
README.md
@@ -56,10 +56,20 @@ in `kube-system` namespace.
|
||||
|
||||
First we create a simple Docker image utilizing the Dockerfile found in the root directory:
|
||||
|
||||
```
|
||||
$ make dev-image
|
||||
```
|
||||
|
||||
This creates an image based off the binary we've built before. To build both the
|
||||
binary and image in one step you can run the following command:
|
||||
|
||||
```
|
||||
$ make image
|
||||
```
|
||||
|
||||
This eliminates the need to have Go installed locally and builds the binary
|
||||
within it's own container.
|
||||
|
||||
### Create a cluster role
|
||||
|
||||
To give necessary permissions for the descheduler to work in a pod, create a cluster role:
|
||||
@@ -150,9 +160,9 @@ $ kubectl create -f descheduler-job.yaml
|
||||
```
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
|
||||
Descheduler's policy is configurable and includes strategies to be enabled or disabled.
|
||||
Three strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity` are currently implemented.
|
||||
Four strategies, `RemoveDuplicates`, `LowNodeUtilization`, `RemovePodsViolatingInterPodAntiAffinity`, `RemovePodsViolatingNodeAffinity` are currently implemented.
|
||||
As part of the policy, the parameters associated with the strategies can be configured too.
|
||||
By default, all strategies are enabled.
|
||||
|
||||
@@ -228,16 +238,31 @@ strategies:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure that pods violating node affinity are removed from nodes. For example, there is podA that was scheduled on nodeA which satisfied the node affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time of scheduling, but over time nodeA no longer satisfies the rule, then if another node nodeB is available that satisfies the node affinity rule, then podA will be evicted from nodeA. The policy file should like this -
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
## Pod Evictions
|
||||
|
||||
When the descheduler decides to evict pods from a node, it employs following general mechanism:
|
||||
|
||||
* Critical pods (with annotations scheduler.alpha.kubernetes.io/critical-pod) are never evicted.
|
||||
* Critical pods (with annotations scheduler.alpha.kubernetes.io/critical-pod) are never evicted.
|
||||
* Pods (static or mirrored pods or stand alone pods) not part of an RC, RS, Deployment or Jobs are
|
||||
never evicted because these pods won't be recreated.
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods with local storage are never evicted.
|
||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||
* Best efforts pods are evicted before Burstable and Guaranteed pods.
|
||||
|
||||
### Pod disruption Budget (PDB)
|
||||
Pods subject to Pod Disruption Budget (PDB) are not evicted if descheduling violates its pod
|
||||
@@ -248,7 +273,7 @@ disruption budget (PDB). The pods are evicted by using eviction subresource to h
|
||||
This roadmap is not in any particular order.
|
||||
|
||||
* Strategy to consider taints and tolerations
|
||||
* Consideration of pod affinity
|
||||
* Consideration of pod affinity
|
||||
* Strategy to consider pod life time
|
||||
* Strategy to consider number of pending pods
|
||||
* Integration with cluster autoscaler
|
||||
@@ -256,6 +281,13 @@ This roadmap is not in any particular order.
|
||||
* Consideration of Kubernetes's scheduler's predicates
|
||||
|
||||
|
||||
## Compatibility matrix
|
||||
|
||||
Descheduler | supported Kubernetes version
|
||||
-------------|-----------------------------
|
||||
0.4 | 1.9+
|
||||
0.1-0.3 | 1.7-1.8
|
||||
|
||||
## Note
|
||||
|
||||
This project is under active development, and is not intended for production use.
|
||||
|
||||
@@ -55,4 +55,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||
}
|
||||
|
||||
8
examples/node-affinity.yml
Normal file
8
examples/node-affinity.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
95
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
95
hack/e2e-gce/gcloud_create_cluster.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
echo "Make sure that uuid package is installed"
|
||||
|
||||
master_uuid=$(uuid)
|
||||
node1_uuid=$(uuid)
|
||||
node2_uuid=$(uuid)
|
||||
kube_apiserver_port=6443
|
||||
kube_version=1.9.4
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
|
||||
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||
|
||||
|
||||
create_cluster() {
|
||||
echo "#################### Creating instances ##########################"
|
||||
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
# Delete the firewall port created for master.
|
||||
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
chmod 755 $E2E_GCE_HOME/delete_cluster.sh
|
||||
}
|
||||
|
||||
|
||||
generate_kubeadm_instance_files() {
|
||||
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
|
||||
master_public_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $5}')
|
||||
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
|
||||
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
|
||||
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_public_ip}" --skip-preflight-checks --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||
}
|
||||
|
||||
|
||||
transfer_install_files() {
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
}
|
||||
|
||||
|
||||
install_kube() {
|
||||
# Docker installation.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
# kubeadm installation.
|
||||
# 1. Transfer files to master, nodes.
|
||||
transfer_install_files
|
||||
# 2. Install kubeadm.
|
||||
#TODO: Add rm /tmp/kubeadm_install.sh
|
||||
# Open port for kube API server
|
||||
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
|
||||
|
||||
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
|
||||
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
|
||||
|
||||
# Postinstall on master, need to add a network plugin for kube-dns to come to running state.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
|
||||
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
|
||||
|
||||
# Copy kubeadm_join to every node.
|
||||
#TODO: Put these in a loop, so that extension becomes possible.
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
}
|
||||
|
||||
|
||||
create_cluster
|
||||
|
||||
generate_kubeadm_instance_files
|
||||
|
||||
install_kube
|
||||
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
8
hack/e2e-gce/gcloud_sdk_configure.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
|
||||
gcloud auth activate-service-account --key-file "${GCE_SA_CREDS}"
|
||||
gcloud config set project $GCE_PROJECT_ID
|
||||
gcloud config set compute/zone $GCE_ZONE
|
||||
9
hack/e2e-gce/install_gcloud.sh
Executable file
9
hack/e2e-gce/install_gcloud.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
tar -xvzf google-cloud-sdk-176.0.0-linux-x86_64.tar.gz
|
||||
|
||||
./google-cloud-sdk/install.sh -q
|
||||
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
11
hack/e2e-gce/kubeadm_preinstall.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
apt-get update
|
||||
apt-get install -y docker.io
|
||||
|
||||
apt-get update && apt-get install -y apt-transport-https
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
|
||||
deb http://apt.kubernetes.io/ kubernetes-xenial main
|
||||
EOF
|
||||
apt-get update
|
||||
apt-get install -y kubelet kubeadm kubectl
|
||||
exit 0
|
||||
@@ -43,5 +43,5 @@ OS_ROOT="$( os::util::absolute_path "${init_source}" )"
|
||||
export OS_ROOT
|
||||
cd "${OS_ROOT}"
|
||||
|
||||
PRJ_PREFIX="github.com/kubernetes-incubator/descheduler"
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
OS_OUTPUT_BINPATH="${OS_ROOT}/_output/bin"
|
||||
|
||||
@@ -40,7 +40,7 @@ generated_files=($(
|
||||
|
||||
# We only work for deps within this prefix.
|
||||
#my_prefix="k8s.io/kubernetes"
|
||||
my_prefix="github.com/kubernetes-incubator/descheduler"
|
||||
my_prefix="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
|
||||
# Register function to be called on EXIT to remove codecgen
|
||||
# binary and also to touch the files that should be regenerated
|
||||
|
||||
@@ -673,9 +673,9 @@ func (x *StrategyParameters) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(1)
|
||||
r.WriteArrayStart(2)
|
||||
} else {
|
||||
r.WriteMapStart(1)
|
||||
r.WriteMapStart(2)
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
@@ -688,6 +688,33 @@ func (x *StrategyParameters) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yy6 := &x.NodeResourceUtilizationThresholds
|
||||
yy6.CodecEncodeSelf(e)
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if x.NodeAffinityType == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym9 := z.EncBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else {
|
||||
z.F.EncSliceStringV(x.NodeAffinityType, e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `NodeAffinityType`)
|
||||
r.WriteMapElemValue()
|
||||
if x.NodeAffinityType == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
z.F.EncSliceStringV(x.NodeAffinityType, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -754,6 +781,18 @@ func (x *StrategyParameters) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
||||
yyv4 := &x.NodeResourceUtilizationThresholds
|
||||
yyv4.CodecDecodeSelf(d)
|
||||
}
|
||||
case "NodeAffinityType":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeAffinityType = nil
|
||||
} else {
|
||||
yyv5 := &x.NodeAffinityType
|
||||
yym6 := z.DecBinary()
|
||||
_ = yym6
|
||||
if false {
|
||||
} else {
|
||||
z.F.DecSliceStringX(yyv5, d)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -765,16 +804,16 @@ func (x *StrategyParameters) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj5 int
|
||||
var yyb5 bool
|
||||
var yyhl5 bool = l >= 0
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -782,21 +821,43 @@ func (x *StrategyParameters) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeResourceUtilizationThresholds = NodeResourceUtilizationThresholds{}
|
||||
} else {
|
||||
yyv6 := &x.NodeResourceUtilizationThresholds
|
||||
yyv6.CodecDecodeSelf(d)
|
||||
yyv8 := &x.NodeResourceUtilizationThresholds
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeAffinityType = nil
|
||||
} else {
|
||||
yyv9 := &x.NodeAffinityType
|
||||
yym10 := z.DecBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
z.F.DecSliceStringX(yyv9, d)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj5-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
@@ -1131,7 +1192,7 @@ func (x codecSelfer1234) decStrategyList(v *StrategyList, d *codec1978.Decoder)
|
||||
yyl1 := r.ReadMapStart()
|
||||
yybh1 := z.DecBasicHandle()
|
||||
if yyv1 == nil {
|
||||
yyrl1 := z.DecInferLen(yyl1, yybh1.MaxInitLen, 56)
|
||||
yyrl1 := z.DecInferLen(yyl1, yybh1.MaxInitLen, 80)
|
||||
yyv1 = make(map[StrategyName]DeschedulerStrategy, yyrl1)
|
||||
*v = yyv1
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ type DeschedulerStrategy struct {
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds
|
||||
NodeAffinityType []string
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -707,13 +707,14 @@ func (x *StrategyParameters) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [1]bool
|
||||
var yyq2 [2]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = true
|
||||
yyq2[1] = len(x.NodeAffinityType) != 0
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(1)
|
||||
r.WriteArrayStart(2)
|
||||
} else {
|
||||
var yynn2 = 0
|
||||
for _, b := range yyq2 {
|
||||
@@ -741,6 +742,39 @@ func (x *StrategyParameters) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yy6.CodecEncodeSelf(e)
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[1] {
|
||||
if x.NodeAffinityType == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym9 := z.EncBinary()
|
||||
_ = yym9
|
||||
if false {
|
||||
} else {
|
||||
z.F.EncSliceStringV(x.NodeAffinityType, e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
r.EncodeNil()
|
||||
}
|
||||
} else {
|
||||
if yyq2[1] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `nodeAffinityType`)
|
||||
r.WriteMapElemValue()
|
||||
if x.NodeAffinityType == nil {
|
||||
r.EncodeNil()
|
||||
} else {
|
||||
yym10 := z.EncBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
z.F.EncSliceStringV(x.NodeAffinityType, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -807,6 +841,18 @@ func (x *StrategyParameters) codecDecodeSelfFromMap(l int, d *codec1978.Decoder)
|
||||
yyv4 := &x.NodeResourceUtilizationThresholds
|
||||
yyv4.CodecDecodeSelf(d)
|
||||
}
|
||||
case "nodeAffinityType":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeAffinityType = nil
|
||||
} else {
|
||||
yyv5 := &x.NodeAffinityType
|
||||
yym6 := z.DecBinary()
|
||||
_ = yym6
|
||||
if false {
|
||||
} else {
|
||||
z.F.DecSliceStringX(yyv5, d)
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -818,16 +864,16 @@ func (x *StrategyParameters) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj5 int
|
||||
var yyb5 bool
|
||||
var yyhl5 bool = l >= 0
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
var yyj7 int
|
||||
var yyb7 bool
|
||||
var yyhl7 bool = l >= 0
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -835,21 +881,43 @@ func (x *StrategyParameters) codecDecodeSelfFromArray(l int, d *codec1978.Decode
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeResourceUtilizationThresholds = NodeResourceUtilizationThresholds{}
|
||||
} else {
|
||||
yyv6 := &x.NodeResourceUtilizationThresholds
|
||||
yyv6.CodecDecodeSelf(d)
|
||||
yyv8 := &x.NodeResourceUtilizationThresholds
|
||||
yyv8.CodecDecodeSelf(d)
|
||||
}
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb7 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeAffinityType = nil
|
||||
} else {
|
||||
yyv9 := &x.NodeAffinityType
|
||||
yym10 := z.DecBinary()
|
||||
_ = yym10
|
||||
if false {
|
||||
} else {
|
||||
z.F.DecSliceStringX(yyv9, d)
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj5++
|
||||
if yyhl5 {
|
||||
yyb5 = yyj5 > l
|
||||
yyj7++
|
||||
if yyhl7 {
|
||||
yyb7 = yyj7 > l
|
||||
} else {
|
||||
yyb5 = r.CheckBreak()
|
||||
yyb7 = r.CheckBreak()
|
||||
}
|
||||
if yyb5 {
|
||||
if yyb7 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj5-1, "")
|
||||
z.DecStructFieldNotFound(yyj7-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
@@ -1214,7 +1282,7 @@ func (x codecSelfer1234) decStrategyList(v *StrategyList, d *codec1978.Decoder)
|
||||
yyl1 := r.ReadMapStart()
|
||||
yybh1 := z.DecBasicHandle()
|
||||
if yyv1 == nil {
|
||||
yyrl1 := z.DecInferLen(yyl1, yybh1.MaxInitLen, 56)
|
||||
yyrl1 := z.DecInferLen(yyl1, yybh1.MaxInitLen, 80)
|
||||
yyv1 = make(map[StrategyName]DeschedulerStrategy, yyrl1)
|
||||
*v = yyv1
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ type DeschedulerStrategy struct {
|
||||
// Only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
|
||||
@@ -123,6 +123,7 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
if err := Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -135,6 +136,7 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
if err := Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(&in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -110,6 +110,11 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -110,6 +110,11 @@ func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilization
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
in.NodeResourceUtilizationThresholds.DeepCopyInto(&out.NodeResourceUtilizationThresholds)
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -77,16 +77,16 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(7)
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 5
|
||||
var yynn2 = 6
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
@@ -244,6 +244,25 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `MaxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -389,6 +408,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "MaxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -400,16 +431,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -417,29 +448,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
@@ -447,13 +456,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -461,23 +492,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt24 := z.Extension(z.I2Rtid(yyv23)); yyxt24 != nil {
|
||||
z.DecExtension(yyv23, yyxt24)
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -485,29 +516,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
@@ -515,13 +524,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -529,21 +560,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -551,26 +582,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
@@ -87,8 +87,9 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yyq2[4] = x.PolicyConfigFile != ""
|
||||
yyq2[5] = x.DryRun != false
|
||||
yyq2[6] = x.NodeSelector != ""
|
||||
yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(7)
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
@@ -272,6 +273,31 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[7] {
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[7] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `maxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -417,6 +443,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "maxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -428,16 +466,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -445,29 +483,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
@@ -475,13 +491,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -489,23 +527,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt24 := z.Extension(z.I2Rtid(yyv23)); yyxt24 != nil {
|
||||
z.DecExtension(yyv23, yyxt24)
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -513,29 +551,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
@@ -543,13 +559,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -557,21 +595,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -579,26 +617,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,6 +62,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -55,14 +55,16 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nodes) == 0 {
|
||||
glog.V(1).Infof("node list is empty")
|
||||
if len(nodes) <= 1 {
|
||||
glog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return nil
|
||||
}
|
||||
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes)
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/utils"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -124,3 +125,43 @@ func IsNodeUschedulable(node *v1.Node) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
|
||||
// multiple criteria, like, pod node selector matching the node label, node
|
||||
// being schedulable or not.
|
||||
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
if !IsNodeUschedulable(node) {
|
||||
glog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
|
||||
// node selector matches the node label.
|
||||
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return false
|
||||
}
|
||||
|
||||
if !ok {
|
||||
glog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
@@ -102,3 +103,245 @@ func TestIsNodeUschedulable(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPodFitsCurrentNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
node *v1.Node
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
description: "Pod with nodeAffinity set, expected to fit the node",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod with nodeAffinity set, not expected to fit the node",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
actual := PodFitsCurrentNode(tc.pod, tc.node)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsAnyNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit none of the nodes",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Nodes are unschedulable but labels match, should fail",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
actual := PodFitsAnyNode(tc.pod, tc.nodes)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
@@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
@@ -53,16 +54,20 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
|
||||
glog.V(1).Infof("%#v", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
if nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
||||
} else {
|
||||
podsEvicted++
|
||||
nodepodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodepodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
@@ -37,11 +37,15 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
|
||||
// All the following pods expect for one will be evicted.
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
@@ -66,12 +70,14 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
expectedEvictedPodCount := 2
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ type NodeUsageMap struct {
|
||||
}
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
|
||||
podsEvicted := 0
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
@@ -189,17 +189,17 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
currentPodsEvicted := podsEvicted
|
||||
currentPodsEvicted := nodepodCount[node.node]
|
||||
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict burstable pods
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict guaranteed pods
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
|
||||
podsEvictedFromNode := podsEvicted - currentPodsEvicted
|
||||
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", podsEvictedFromNode, node.node.Name, node.usage)
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
nodepodCount[node.node] = currentPodsEvicted
|
||||
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
@@ -214,10 +214,13 @@ func evictPods(inputPods []*v1.Pod,
|
||||
totalCpu *float64,
|
||||
totalMem *float64,
|
||||
podsEvicted *int,
|
||||
dryRun bool) {
|
||||
dryRun bool, maxPodsToEvict int) {
|
||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if *podsEvicted+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
|
||||
@@ -109,15 +109,19 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 4
|
||||
expectedPodsEvicted := 3
|
||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
74
pkg/descheduler/strategies/node_affinity.go
Normal file
74
pkg/descheduler/strategies/node_affinity.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
glog.V(1).Infof("Evicted %v pods", evictionCount)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
evictedPodCount := 0
|
||||
if !strategy.Enabled {
|
||||
return evictedPodCount
|
||||
}
|
||||
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
glog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
switch nodeAffinity {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
glog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, false)
|
||||
nodepodCount[node]++
|
||||
}
|
||||
}
|
||||
}
|
||||
evictedPodCount += nodepodCount[node]
|
||||
}
|
||||
default:
|
||||
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||
return evictedPodCount
|
||||
}
|
||||
}
|
||||
return evictedPodCount
|
||||
}
|
||||
184
pkg/descheduler/strategies/node_affinity_test.go
Normal file
184
pkg/descheduler/strategies/node_affinity_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/test"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node) []v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name)
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
return []v1.Pod{
|
||||
*podWithNodeAffinity,
|
||||
*pod1,
|
||||
*pod2,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodes []*v1.Node
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
npe nodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "Strategy disabled, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingRequiredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 0, should not be evicted",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
|
||||
ds := options.DeschedulerServer{
|
||||
Client: fakeClient,
|
||||
}
|
||||
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,15 +31,15 @@ import (
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
@@ -49,12 +49,15 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if nodePodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
podsEvicted++
|
||||
nodePodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
@@ -65,6 +68,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodePodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
@@ -79,8 +79,15 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
expectedEvictedPodCount := 1
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 0
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
expectedEvictedPodCount = 1
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
37
pkg/descheduler/strategies/util.go
Normal file
37
pkg/descheduler/strategies/util.go
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
|
||||
// to compute them again in each strategy.
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
// InitializeNodePodCount initializes the nodePodCount.
|
||||
func InitializeNodePodCount(nodeList []*v1.Node) nodePodEvictedCount {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodeList {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
}
|
||||
return nodePodCount
|
||||
}
|
||||
94
pkg/utils/predicates.go
Normal file
94
pkg/utils/predicates.go
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
)
|
||||
|
||||
// The following code has been copied from predicates package to avoid the
|
||||
// huge vendoring issues, mostly copied from
|
||||
// k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/
|
||||
// Some minor changes have been made to ease the imports, but most of the code
|
||||
// remains untouched
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node == nil {
|
||||
return false, fmt.Errorf("node not found")
|
||||
}
|
||||
if podMatchesNodeLabels(pod, node) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||
if len(pod.Spec.NodeSelector) > 0 {
|
||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||
if !selector.Matches(labels.Set(node.Labels)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
|
||||
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
|
||||
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
|
||||
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
|
||||
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.NodeAffinity != nil {
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
|
||||
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
|
||||
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||
// terms are ORed, and an empty list of terms will match nothing.
|
||||
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||
return false
|
||||
}
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
156
test/e2e/e2e_test.go
Normal file
156
test/e2e/e2e_test.go
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
deschedulerapi "github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/client"
|
||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/node"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/strategies"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
)
|
||||
|
||||
func MakePodSpec() v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "kubernetes/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("500Mi"),
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// RcByNameContainer returns a ReplicationControoler with specified name and container
|
||||
func RcByNameContainer(name string, replicas int32, labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
|
||||
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ReplicationController",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec(),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||
func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||
thresholds[v1.ResourceMemory] = 20
|
||||
thresholds[v1.ResourcePods] = 20
|
||||
thresholds[v1.ResourceCPU] = 85
|
||||
targetThresholds[v1.ResourceMemory] = 20
|
||||
targetThresholds[v1.ResourcePods] = 20
|
||||
targetThresholds[v1.ResourceCPU] = 90
|
||||
// Run descheduler.
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
stopChannel := make(chan struct{})
|
||||
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
|
||||
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
|
||||
ds := &options.DeschedulerServer{Client: clientset}
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
||||
// be in /tmp directory.
|
||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
nodeList, err := clientSet.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||
// So, the last node would have least utilization.
|
||||
leastLoadedNode := nodeList.Items[2]
|
||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
||||
if err != nil {
|
||||
t.Errorf("Error creating deployment %v", err)
|
||||
}
|
||||
podsOnleastUtilizedNode, err := podutil.ListPodsOnANode(clientSet, &leastLoadedNode)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsBefore := len(podsOnleastUtilizedNode)
|
||||
t.Log("Eviction of pods starting")
|
||||
startEndToEndForLowNodeUtilization(clientSet)
|
||||
podsOnleastUtilizedNode, err = podutil.ListPodsOnANode(clientSet, &leastLoadedNode)
|
||||
if err != nil {
|
||||
t.Errorf("Error listing pods on a node %v", err)
|
||||
}
|
||||
podsAfter := len(podsOnleastUtilizedNode)
|
||||
if podsBefore > podsAfter {
|
||||
t.Fatalf("We should have see more pods on this node as per kubeadm's way of installing %v, %v", podsBefore, podsAfter)
|
||||
}
|
||||
}
|
||||
20
test/run-e2e-tests.sh
Executable file
20
test/run-e2e-tests.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# This just run e2e tests.
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
go test ${PRJ_PREFIX}/test/e2e/ -v
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# run unit tests
|
||||
go test $(go list github.com/kubernetes-incubator/descheduler/... | grep -v github.com/kubernetes-incubator/descheduler/vendor/)
|
||||
# This just run unit-tests. Ignoring the current directory so as to avoid running e2e tests.
|
||||
PRJ_PREFIX="github.com/${REPO_ORG:-kubernetes-incubator}/descheduler"
|
||||
go test $(go list ${PRJ_PREFIX}/... | grep -v ${PRJ_PREFIX}/vendor/| grep -v ${PRJ_PREFIX}/test/)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user