mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 21:31:18 +01:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc1688057a | ||
|
|
e6e200b93c | ||
|
|
5d843d1f08 | ||
|
|
6c981cc067 | ||
|
|
22a3a6ea1d | ||
|
|
294bddb5e2 | ||
|
|
0a9d1959e2 | ||
|
|
19ee5d80b5 | ||
|
|
14d9e175c2 | ||
|
|
468e138070 | ||
|
|
db13b2ac73 | ||
|
|
40ca53e0a5 | ||
|
|
35d8367fe5 | ||
|
|
345dd9cf27 | ||
|
|
81f471fe05 | ||
|
|
aa5e8770f5 | ||
|
|
2690d139c5 | ||
|
|
cd192ce5fc | ||
|
|
048f3fd1e5 | ||
|
|
a079fd2757 |
23
CONTRIBUTING.md
Normal file
23
CONTRIBUTING.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
|
||||
|
||||
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
|
||||
|
||||
## Getting Started
|
||||
|
||||
We have full documentation on how to get started contributing here:
|
||||
|
||||
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
|
||||
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
|
||||
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
|
||||
|
||||
## Mentorship
|
||||
|
||||
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
|
||||
|
||||
|
||||
## Contact Information
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
23
README.md
23
README.md
@@ -138,11 +138,7 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-ec"
|
||||
- |
|
||||
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
|
||||
command: ["/bin/descheduler", "--policy-config-file", "/policy-dir/policy.yaml"]
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
@@ -285,11 +281,18 @@ This roadmap is not in any particular order.
|
||||
|
||||
Descheduler | supported Kubernetes version
|
||||
-------------|-----------------------------
|
||||
0.4 | 1.9+
|
||||
0.4+ | 1.9+
|
||||
0.1-0.3 | 1.7-1.8
|
||||
|
||||
## Note
|
||||
## Community, discussion, contribution, and support
|
||||
|
||||
This project is under active development, and is not intended for production use.
|
||||
Any api could be changed any time with out any notice. That said, your feedback is
|
||||
very important and appreciated to make this project more stable and useful.
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
||||
You can reach the maintainers of this project at:
|
||||
|
||||
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
|
||||
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
|
||||
|
||||
### Code of conduct
|
||||
|
||||
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||
|
||||
@@ -57,4 +57,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ master_uuid=$(uuid)
|
||||
node1_uuid=$(uuid)
|
||||
node2_uuid=$(uuid)
|
||||
kube_apiserver_port=6443
|
||||
kube_version=1.11.1
|
||||
kube_version=1.13.1
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
|
||||
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||
@@ -36,10 +36,10 @@ create_cluster() {
|
||||
|
||||
generate_kubeadm_instance_files() {
|
||||
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
|
||||
master_public_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $5}')
|
||||
master_private_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $4}')
|
||||
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
|
||||
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
|
||||
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_public_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_private_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
|
||||
}
|
||||
|
||||
|
||||
|
||||
27
kubernetes/configmap.yaml
Normal file
27
kubernetes/configmap.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: descheduler-policy-configmap
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
|
||||
33
kubernetes/job.yaml
Normal file
33
kubernetes/job.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
spec:
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: descheduler:latest
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
37
kubernetes/rbac.yaml
Normal file
37
kubernetes/rbac.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: descheduler-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: descehduler-cluster-role-binding
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: descheduler-cluster-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -77,16 +77,16 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [8]bool
|
||||
var yyq2 [9]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(8)
|
||||
r.WriteArrayStart(9)
|
||||
} else {
|
||||
var yynn2 = 6
|
||||
var yynn2 = 7
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
@@ -263,6 +263,25 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym28 := z.EncBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.EvictLocalStoragePods))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `EvictLocalStoragePods`)
|
||||
r.WriteMapElemValue()
|
||||
yym29 := z.EncBinary()
|
||||
_ = yym29
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.EvictLocalStoragePods))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -420,6 +439,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
case "EvictLocalStoragePods":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.EvictLocalStoragePods = false
|
||||
} else {
|
||||
yyv20 := &x.EvictLocalStoragePods
|
||||
yym21 := z.DecBinary()
|
||||
_ = yym21
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv20)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -431,16 +462,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
var yyj22 int
|
||||
var yyb22 bool
|
||||
var yyhl22 bool = l >= 0
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -448,29 +479,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yyv23 := &x.Kind
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
@@ -478,13 +487,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv25 := &x.APIVersion
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -492,23 +523,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
yyv27 := &x.DeschedulingInterval
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else if yyxt28 := z.Extension(z.I2Rtid(yyv27)); yyxt28 != nil {
|
||||
z.DecExtension(yyv27, yyxt28)
|
||||
} else {
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv27)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -516,29 +547,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yyv29 := &x.KubeconfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
@@ -546,13 +555,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv31 := &x.PolicyConfigFile
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -560,21 +591,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.DryRun
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
*((*bool)(yyv33)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -582,21 +613,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
yyv35 := &x.NodeSelector
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
*((*string)(yyv35)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -604,26 +635,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
yyv37 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym38 := z.DecBinary()
|
||||
_ = yym38
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
*((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.EvictLocalStoragePods = false
|
||||
} else {
|
||||
yyv39 := &x.EvictLocalStoragePods
|
||||
yym40 := z.DecBinary()
|
||||
_ = yym40
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv39)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
z.DecStructFieldNotFound(yyj22-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -45,4 +45,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [8]bool
|
||||
var yyq2 [9]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
@@ -88,8 +88,9 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yyq2[5] = x.DryRun != false
|
||||
yyq2[6] = x.NodeSelector != ""
|
||||
yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0
|
||||
yyq2[8] = x.EvictLocalStoragePods != false
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(8)
|
||||
r.WriteArrayStart(9)
|
||||
} else {
|
||||
var yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
@@ -298,6 +299,31 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[8] {
|
||||
yym28 := z.EncBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.EvictLocalStoragePods))
|
||||
}
|
||||
} else {
|
||||
r.EncodeBool(false)
|
||||
}
|
||||
} else {
|
||||
if yyq2[8] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `evictLocalStoragePods`)
|
||||
r.WriteMapElemValue()
|
||||
yym29 := z.EncBinary()
|
||||
_ = yym29
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeBool(bool(x.EvictLocalStoragePods))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -455,6 +481,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
case "evictLocalStoragePods":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.EvictLocalStoragePods = false
|
||||
} else {
|
||||
yyv20 := &x.EvictLocalStoragePods
|
||||
yym21 := z.DecBinary()
|
||||
_ = yym21
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv20)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -466,16 +504,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
var yyj22 int
|
||||
var yyb22 bool
|
||||
var yyhl22 bool = l >= 0
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -483,29 +521,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yyv23 := &x.Kind
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
@@ -513,13 +529,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv25 := &x.APIVersion
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -527,23 +565,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
yyv27 := &x.DeschedulingInterval
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else if yyxt28 := z.Extension(z.I2Rtid(yyv27)); yyxt28 != nil {
|
||||
z.DecExtension(yyv27, yyxt28)
|
||||
} else {
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv27)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -551,29 +589,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yyv29 := &x.KubeconfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
@@ -581,13 +597,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv31 := &x.PolicyConfigFile
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -595,21 +633,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.DryRun
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
*((*bool)(yyv33)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -617,21 +655,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
yyv35 := &x.NodeSelector
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
*((*string)(yyv35)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -639,26 +677,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
yyv37 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym38 := z.DecBinary()
|
||||
_ = yym38
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
*((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb22 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.EvictLocalStoragePods = false
|
||||
} else {
|
||||
yyv39 := &x.EvictLocalStoragePods
|
||||
yym40 := z.DecBinary()
|
||||
_ = yym40
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv39)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
yyj22++
|
||||
if yyhl22 {
|
||||
yyb22 = yyj22 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
yyb22 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
if yyb22 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
z.DecStructFieldNotFound(yyj22-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -45,4 +45,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -63,6 +64,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -52,23 +52,23 @@ func IsLatencySensitivePod(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// IsEvictable checks if a pod is evictable or not.
|
||||
func IsEvictable(pod *v1.Pod) bool {
|
||||
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
||||
ownerRefList := OwnerRef(pod)
|
||||
if IsMirrorPod(pod) || IsPodWithLocalStorage(pod) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) {
|
||||
if IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(client, node)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
evictablePods := make([]*v1.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !IsEvictable(pod) {
|
||||
if !IsEvictable(pod, evictLocalStoragePods) {
|
||||
continue
|
||||
} else {
|
||||
evictablePods = append(evictablePods, pod)
|
||||
|
||||
@@ -40,15 +40,15 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
dpm := ListDuplicatePodsOnANode(client, node)
|
||||
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
glog.V(1).Infof("%#v", creator)
|
||||
@@ -73,8 +73,8 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
|
||||
}
|
||||
|
||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
})
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
|
||||
|
||||
glog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
@@ -131,10 +131,10 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
|
||||
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
|
||||
for node, pods := range npm {
|
||||
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
|
||||
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
|
||||
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
|
||||
|
||||
// Check if node is underutilized and if we can schedule pods on it.
|
||||
@@ -351,7 +351,7 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
|
||||
}
|
||||
|
||||
// Nodeutilization returns the current usage of node.
|
||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
|
||||
bePods := []*v1.Pod{}
|
||||
nonRemovablePods := []*v1.Pod{}
|
||||
bPods := []*v1.Pod{}
|
||||
@@ -359,7 +359,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
|
||||
totalReqs := map[v1.ResourceName]resource.Quantity{}
|
||||
for _, pod := range pods {
|
||||
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
|
||||
if !podutil.IsEvictable(pod) {
|
||||
if !podutil.IsEvictable(pod, evictLocalStoragePods) {
|
||||
nonRemovablePods = append(nonRemovablePods, pod)
|
||||
if podutil.IsBestEffortPod(pod) {
|
||||
continue
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
@@ -217,7 +217,7 @@ func TestLowNodeUtilizationWithPriorities(t *testing.T) {
|
||||
})
|
||||
expectedPodsEvicted := 3
|
||||
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
@@ -267,9 +267,6 @@ func TestSortPodsByPriority(t *testing.T) {
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
|
||||
sortPodsBasedOnPriority(podList)
|
||||
for _, pod := range podList {
|
||||
fmt.Println(pod)
|
||||
}
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
}
|
||||
|
||||
@@ -27,10 +27,10 @@ import (
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
evictedPodCount := 0
|
||||
if !strategy.Enabled {
|
||||
return evictedPodCount
|
||||
@@ -44,7 +44,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
Client: fakeClient,
|
||||
}
|
||||
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false)
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
|
||||
@@ -35,15 +35,15 @@ func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, stra
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -55,13 +55,13 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 3
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount = 1
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user