1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

...

26 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
fc1688057a Merge pull request #142 from adyada/master
Added support to evict pods that use local storage.
2019-04-12 07:42:46 -07:00
Adi Yadav
e6e200b93c Added auto-generated changes. 2019-04-11 23:24:56 -04:00
Adi Yadav
5d843d1f08 Added manual changes. 2019-04-11 23:24:20 -04:00
Kubernetes Prow Robot
6c981cc067 Merge pull request #137 from pickledrick/fix/update-installation-readme
update readme to fix unused path
2019-03-04 06:04:39 -08:00
Peter Grant
22a3a6ea1d update readme to fix unused path 2019-03-04 15:16:37 +11:00
Kubernetes Prow Robot
294bddb5e2 Merge pull request #135 from russiancook/patch-1
Update configmap.yaml
2019-02-28 13:34:37 -08:00
russiancook
0a9d1959e2 Update configmap.yaml
Put configmap in kube-system namespace
2019-02-14 12:12:47 +02:00
RaviSantosh Gudimetla
19ee5d80b5 Merge pull request #124 from ravisantoshgudimetla/fix-kubeadm
Run apiserver on private ip and bump kube version installed to 1.13
2019-01-14 22:41:26 -05:00
ravisantoshgudimetla
14d9e175c2 Run apiserver on private ip and bump kube version installed to 1.13 2018-12-14 14:54:42 -05:00
RaviSantosh Gudimetla
468e138070 Merge pull request #114 from nikhita/contributing.md
Add CONTRIBUTING.md
2018-09-03 09:15:37 -04:00
Nikhita Raghunath
db13b2ac73 Add CONTRIBUTING.md 2018-09-01 19:29:24 +05:30
Avesh Agarwal
40ca53e0a5 Merge pull request #113 from sanity-io/master
Yaml files for kubernetes
2018-08-24 08:50:41 -04:00
nicholas.klem
35d8367fe5 rbac.authorization.k8s.io/v1 - not v1beta1 2018-08-24 14:44:19 +02:00
nicholas.klem
345dd9cf27 add kubernetes yaml files 2018-08-24 14:34:38 +02:00
RaviSantosh Gudimetla
81f471fe05 Merge pull request #111 from kubernetes-incubator/ravisantoshgudimetla-patch-2
Remove production usage warning
2018-08-23 15:57:09 -04:00
RaviSantosh Gudimetla
aa5e8770f5 Remove production usage warning
Removing production usage warning to encourage more users to try using descheduler and since descheduler has been stable so far.
2018-08-23 15:44:17 -04:00
RaviSantosh Gudimetla
2690d139c5 Merge pull request #110 from kubernetes-incubator/ravisantoshgudimetla-patch-1
Update the compatibility matrix
2018-08-22 16:31:09 -04:00
RaviSantosh Gudimetla
cd192ce5fc Update the compatibility matrix
Descheduler 0.4+ should work with kube 1.9+.
2018-08-22 16:09:18 -04:00
RaviSantosh Gudimetla
048f3fd1e5 Merge pull request #109 from ravisantoshgudimetla/test-cases-cleanup
Remove the unnecessary print statements in test file
2018-08-22 12:20:27 -04:00
ravisantoshgudimetla
a079fd2757 Remove the unnecessary print statements 2018-08-22 11:27:40 -04:00
RaviSantosh Gudimetla
ae0a9ed525 Merge pull request #108 from ravisantoshgudimetla/fix-warnings-ci
Fix deprecated warning in CI
2018-08-21 15:10:40 -04:00
ravisantoshgudimetla
0a815e8786 Fix deprecated warning in CI 2018-08-21 14:45:14 -04:00
RaviSantosh Gudimetla
0115748fe8 Merge pull request #105 from ravisantoshgudimetla/priority-low-node
Low node utilization to respect priority while evicting pods
2018-08-21 14:33:50 -04:00
ravisantoshgudimetla
d0305dac3f Low node utilization to respect priority while evicting pods 2018-08-21 14:14:26 -04:00
RaviSantosh Gudimetla
72d6a8aa33 Merge pull request #106 from ravisantoshgudimetla/fix-e2e
Fix broken e2e tests
2018-08-06 10:53:13 -04:00
ravisantoshgudimetla
654fdbba94 Fix broken e2e tests 2018-08-05 13:16:47 -04:00
22 changed files with 695 additions and 259 deletions

23
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,23 @@
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)

View File

@@ -138,11 +138,7 @@ spec:
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume
command: command: ["/bin/descheduler", "--policy-config-file", "/policy-dir/policy.yaml"]
- "/bin/sh"
- "-ec"
- |
/bin/descheduler --policy-config-file /policy-dir/policy.yaml
restartPolicy: "Never" restartPolicy: "Never"
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
volumes: volumes:
@@ -285,11 +281,18 @@ This roadmap is not in any particular order.
Descheduler | supported Kubernetes version Descheduler | supported Kubernetes version
-------------|----------------------------- -------------|-----------------------------
0.4 | 1.9+ 0.4+ | 1.9+
0.1-0.3 | 1.7-1.8 0.1-0.3 | 1.7-1.8
## Note ## Community, discussion, contribution, and support
This project is under active development, and is not intended for production use. Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
Any api could be changed any time with out any notice. That said, your feedback is
very important and appreciated to make this project more stable and useful. You can reach the maintainers of this project at:
- [Slack channel](https://kubernetes.slack.com/messages/sig-scheduling)
- [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling)
### Code of conduct
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).

View File

@@ -57,4 +57,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler. // max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler") fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
} }

View File

@@ -10,7 +10,7 @@ master_uuid=$(uuid)
node1_uuid=$(uuid) node1_uuid=$(uuid)
node2_uuid=$(uuid) node2_uuid=$(uuid)
kube_apiserver_port=6443 kube_apiserver_port=6443
kube_version=1.9.4 kube_version=1.13.1
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/../../
E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
@@ -36,10 +36,10 @@ create_cluster() {
generate_kubeadm_instance_files() { generate_kubeadm_instance_files() {
# TODO: Check if they have come up. awk $6 contains the state(RUNNING or not). # TODO: Check if they have come up. awk $6 contains the state(RUNNING or not).
master_public_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $5}') master_private_ip=$(gcloud compute instances list | grep $master_uuid|awk '{print $4}')
node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}') node1_public_ip=$(gcloud compute instances list | grep $node1_uuid|awk '{print $5}')
node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}') node2_public_ip=$(gcloud compute instances list | grep $node2_uuid|awk '{print $5}')
echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_public_ip}" --skip-preflight-checks --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh echo "kubeadm init --kubernetes-version=${kube_version} --apiserver-advertise-address=${master_private_ip}" --ignore-preflight-errors=all --pod-network-cidr=10.96.0.0/12 > $E2E_GCE_HOME/kubeadm_install.sh
} }

27
kubernetes/configmap.yaml Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: descheduler-policy-configmap
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50

33
kubernetes/job.yaml Normal file
View File

@@ -0,0 +1,33 @@
apiVersion: batch/v1
kind: Job
metadata:
name: descheduler-job
namespace: kube-system
spec:
parallelism: 1
completions: 1
template:
metadata:
name: descheduler-pod
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
spec:
containers:
- name: descheduler
image: descheduler:latest
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--v"
- "3"
restartPolicy: "Never"
serviceAccountName: descheduler-sa
volumes:
- name: policy-volume
configMap:
name: descheduler-policy-configmap

37
kubernetes/rbac.yaml Normal file
View File

@@ -0,0 +1,37 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: descheduler-sa
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: descehduler-cluster-role-binding
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: descheduler-cluster-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -77,16 +77,16 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else { } else {
yysep2 := !z.EncBinary() yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [8]bool var yyq2 [9]bool
_ = yyq2 _ = yyq2
_, _ = yysep2, yy2arr2 _, _ = yysep2, yy2arr2
const yyr2 bool = false const yyr2 bool = false
yyq2[0] = x.Kind != "" yyq2[0] = x.Kind != ""
yyq2[1] = x.APIVersion != "" yyq2[1] = x.APIVersion != ""
if yyr2 || yy2arr2 { if yyr2 || yy2arr2 {
r.WriteArrayStart(8) r.WriteArrayStart(9)
} else { } else {
var yynn2 = 6 var yynn2 = 7
for _, b := range yyq2 { for _, b := range yyq2 {
if b { if b {
yynn2++ yynn2++
@@ -263,6 +263,25 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode)) r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
} }
} }
if yyr2 || yy2arr2 {
r.WriteArrayElem()
yym28 := z.EncBinary()
_ = yym28
if false {
} else {
r.EncodeBool(bool(x.EvictLocalStoragePods))
}
} else {
r.WriteMapElemKey()
r.EncStructFieldKey(codecSelferValueTypeString1234, `EvictLocalStoragePods`)
r.WriteMapElemValue()
yym29 := z.EncBinary()
_ = yym29
if false {
} else {
r.EncodeBool(bool(x.EvictLocalStoragePods))
}
}
if yyr2 || yy2arr2 { if yyr2 || yy2arr2 {
r.WriteArrayEnd() r.WriteArrayEnd()
} else { } else {
@@ -420,6 +439,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234)) *((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
} }
} }
case "EvictLocalStoragePods":
if r.TryDecodeAsNil() {
x.EvictLocalStoragePods = false
} else {
yyv20 := &x.EvictLocalStoragePods
yym21 := z.DecBinary()
_ = yym21
if false {
} else {
*((*bool)(yyv20)) = r.DecodeBool()
}
}
default: default:
z.DecStructFieldNotFound(-1, yys3) z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3 } // end switch yys3
@@ -431,16 +462,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
var h codecSelfer1234 var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d) z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r _, _, _ = h, z, r
var yyj20 int var yyj22 int
var yyb20 bool var yyb22 bool
var yyhl20 bool = l >= 0 var yyhl22 bool = l >= 0
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -448,29 +479,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.Kind = "" x.Kind = ""
} else { } else {
yyv21 := &x.Kind yyv23 := &x.Kind
yym22 := z.DecBinary()
_ = yym22
if false {
} else {
*((*string)(yyv21)) = r.DecodeString()
}
}
yyj20++
if yyhl20 {
yyb20 = yyj20 > l
} else {
yyb20 = r.CheckBreak()
}
if yyb20 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv23 := &x.APIVersion
yym24 := z.DecBinary() yym24 := z.DecBinary()
_ = yym24 _ = yym24
if false { if false {
@@ -478,13 +487,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv23)) = r.DecodeString() *((*string)(yyv23)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv25 := &x.APIVersion
yym26 := z.DecBinary()
_ = yym26
if false {
} else {
*((*string)(yyv25)) = r.DecodeString()
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -492,23 +523,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.DeschedulingInterval = 0 x.DeschedulingInterval = 0
} else { } else {
yyv25 := &x.DeschedulingInterval yyv27 := &x.DeschedulingInterval
yym26 := z.DecBinary() yym28 := z.DecBinary()
_ = yym26 _ = yym28
if false { if false {
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil { } else if yyxt28 := z.Extension(z.I2Rtid(yyv27)); yyxt28 != nil {
z.DecExtension(yyv25, yyxt26) z.DecExtension(yyv27, yyxt28)
} else { } else {
*((*int64)(yyv25)) = int64(r.DecodeInt(64)) *((*int64)(yyv27)) = int64(r.DecodeInt(64))
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -516,29 +547,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.KubeconfigFile = "" x.KubeconfigFile = ""
} else { } else {
yyv27 := &x.KubeconfigFile yyv29 := &x.KubeconfigFile
yym28 := z.DecBinary()
_ = yym28
if false {
} else {
*((*string)(yyv27)) = r.DecodeString()
}
}
yyj20++
if yyhl20 {
yyb20 = yyj20 > l
} else {
yyb20 = r.CheckBreak()
}
if yyb20 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv29 := &x.PolicyConfigFile
yym30 := z.DecBinary() yym30 := z.DecBinary()
_ = yym30 _ = yym30
if false { if false {
@@ -546,13 +555,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv29)) = r.DecodeString() *((*string)(yyv29)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv31 := &x.PolicyConfigFile
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
*((*string)(yyv31)) = r.DecodeString()
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -560,21 +591,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.DryRun = false x.DryRun = false
} else { } else {
yyv31 := &x.DryRun yyv33 := &x.DryRun
yym32 := z.DecBinary() yym34 := z.DecBinary()
_ = yym32 _ = yym34
if false { if false {
} else { } else {
*((*bool)(yyv31)) = r.DecodeBool() *((*bool)(yyv33)) = r.DecodeBool()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -582,21 +613,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.NodeSelector = "" x.NodeSelector = ""
} else { } else {
yyv33 := &x.NodeSelector yyv35 := &x.NodeSelector
yym34 := z.DecBinary() yym36 := z.DecBinary()
_ = yym34 _ = yym36
if false { if false {
} else { } else {
*((*string)(yyv33)) = r.DecodeString() *((*string)(yyv35)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -604,26 +635,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.MaxNoOfPodsToEvictPerNode = 0 x.MaxNoOfPodsToEvictPerNode = 0
} else { } else {
yyv35 := &x.MaxNoOfPodsToEvictPerNode yyv37 := &x.MaxNoOfPodsToEvictPerNode
yym36 := z.DecBinary() yym38 := z.DecBinary()
_ = yym36 _ = yym38
if false { if false {
} else { } else {
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234)) *((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize1234))
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.EvictLocalStoragePods = false
} else {
yyv39 := &x.EvictLocalStoragePods
yym40 := z.DecBinary()
_ = yym40
if false {
} else {
*((*bool)(yyv39)) = r.DecodeBool()
} }
} }
for { for {
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
break break
} }
r.ReadArrayElem() r.ReadArrayElem()
z.DecStructFieldNotFound(yyj20-1, "") z.DecStructFieldNotFound(yyj22-1, "")
} }
r.ReadArrayEnd() r.ReadArrayEnd()
} }

View File

@@ -45,4 +45,7 @@ type DeschedulerConfiguration struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int MaxNoOfPodsToEvictPerNode int
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool
} }

View File

@@ -77,7 +77,7 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} else { } else {
yysep2 := !z.EncBinary() yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [8]bool var yyq2 [9]bool
_ = yyq2 _ = yyq2
_, _ = yysep2, yy2arr2 _, _ = yysep2, yy2arr2
const yyr2 bool = false const yyr2 bool = false
@@ -88,8 +88,9 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
yyq2[5] = x.DryRun != false yyq2[5] = x.DryRun != false
yyq2[6] = x.NodeSelector != "" yyq2[6] = x.NodeSelector != ""
yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0 yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0
yyq2[8] = x.EvictLocalStoragePods != false
if yyr2 || yy2arr2 { if yyr2 || yy2arr2 {
r.WriteArrayStart(8) r.WriteArrayStart(9)
} else { } else {
var yynn2 = 1 var yynn2 = 1
for _, b := range yyq2 { for _, b := range yyq2 {
@@ -298,6 +299,31 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
} }
} }
} }
if yyr2 || yy2arr2 {
r.WriteArrayElem()
if yyq2[8] {
yym28 := z.EncBinary()
_ = yym28
if false {
} else {
r.EncodeBool(bool(x.EvictLocalStoragePods))
}
} else {
r.EncodeBool(false)
}
} else {
if yyq2[8] {
r.WriteMapElemKey()
r.EncStructFieldKey(codecSelferValueTypeString1234, `evictLocalStoragePods`)
r.WriteMapElemValue()
yym29 := z.EncBinary()
_ = yym29
if false {
} else {
r.EncodeBool(bool(x.EvictLocalStoragePods))
}
}
}
if yyr2 || yy2arr2 { if yyr2 || yy2arr2 {
r.WriteArrayEnd() r.WriteArrayEnd()
} else { } else {
@@ -455,6 +481,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234)) *((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
} }
} }
case "evictLocalStoragePods":
if r.TryDecodeAsNil() {
x.EvictLocalStoragePods = false
} else {
yyv20 := &x.EvictLocalStoragePods
yym21 := z.DecBinary()
_ = yym21
if false {
} else {
*((*bool)(yyv20)) = r.DecodeBool()
}
}
default: default:
z.DecStructFieldNotFound(-1, yys3) z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3 } // end switch yys3
@@ -466,16 +504,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
var h codecSelfer1234 var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d) z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r _, _, _ = h, z, r
var yyj20 int var yyj22 int
var yyb20 bool var yyb22 bool
var yyhl20 bool = l >= 0 var yyhl22 bool = l >= 0
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -483,29 +521,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.Kind = "" x.Kind = ""
} else { } else {
yyv21 := &x.Kind yyv23 := &x.Kind
yym22 := z.DecBinary()
_ = yym22
if false {
} else {
*((*string)(yyv21)) = r.DecodeString()
}
}
yyj20++
if yyhl20 {
yyb20 = yyj20 > l
} else {
yyb20 = r.CheckBreak()
}
if yyb20 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv23 := &x.APIVersion
yym24 := z.DecBinary() yym24 := z.DecBinary()
_ = yym24 _ = yym24
if false { if false {
@@ -513,13 +529,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv23)) = r.DecodeString() *((*string)(yyv23)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.APIVersion = ""
} else {
yyv25 := &x.APIVersion
yym26 := z.DecBinary()
_ = yym26
if false {
} else {
*((*string)(yyv25)) = r.DecodeString()
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -527,23 +565,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.DeschedulingInterval = 0 x.DeschedulingInterval = 0
} else { } else {
yyv25 := &x.DeschedulingInterval yyv27 := &x.DeschedulingInterval
yym26 := z.DecBinary() yym28 := z.DecBinary()
_ = yym26 _ = yym28
if false { if false {
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil { } else if yyxt28 := z.Extension(z.I2Rtid(yyv27)); yyxt28 != nil {
z.DecExtension(yyv25, yyxt26) z.DecExtension(yyv27, yyxt28)
} else { } else {
*((*int64)(yyv25)) = int64(r.DecodeInt(64)) *((*int64)(yyv27)) = int64(r.DecodeInt(64))
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -551,29 +589,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.KubeconfigFile = "" x.KubeconfigFile = ""
} else { } else {
yyv27 := &x.KubeconfigFile yyv29 := &x.KubeconfigFile
yym28 := z.DecBinary()
_ = yym28
if false {
} else {
*((*string)(yyv27)) = r.DecodeString()
}
}
yyj20++
if yyhl20 {
yyb20 = yyj20 > l
} else {
yyb20 = r.CheckBreak()
}
if yyb20 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv29 := &x.PolicyConfigFile
yym30 := z.DecBinary() yym30 := z.DecBinary()
_ = yym30 _ = yym30
if false { if false {
@@ -581,13 +597,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
*((*string)(yyv29)) = r.DecodeString() *((*string)(yyv29)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.PolicyConfigFile = ""
} else {
yyv31 := &x.PolicyConfigFile
yym32 := z.DecBinary()
_ = yym32
if false {
} else {
*((*string)(yyv31)) = r.DecodeString()
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -595,21 +633,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.DryRun = false x.DryRun = false
} else { } else {
yyv31 := &x.DryRun yyv33 := &x.DryRun
yym32 := z.DecBinary() yym34 := z.DecBinary()
_ = yym32 _ = yym34
if false { if false {
} else { } else {
*((*bool)(yyv31)) = r.DecodeBool() *((*bool)(yyv33)) = r.DecodeBool()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -617,21 +655,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.NodeSelector = "" x.NodeSelector = ""
} else { } else {
yyv33 := &x.NodeSelector yyv35 := &x.NodeSelector
yym34 := z.DecBinary() yym36 := z.DecBinary()
_ = yym34 _ = yym36
if false { if false {
} else { } else {
*((*string)(yyv33)) = r.DecodeString() *((*string)(yyv35)) = r.DecodeString()
} }
} }
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
r.ReadArrayEnd() r.ReadArrayEnd()
return return
} }
@@ -639,26 +677,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
if r.TryDecodeAsNil() { if r.TryDecodeAsNil() {
x.MaxNoOfPodsToEvictPerNode = 0 x.MaxNoOfPodsToEvictPerNode = 0
} else { } else {
yyv35 := &x.MaxNoOfPodsToEvictPerNode yyv37 := &x.MaxNoOfPodsToEvictPerNode
yym36 := z.DecBinary() yym38 := z.DecBinary()
_ = yym36 _ = yym38
if false { if false {
} else { } else {
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234)) *((*int)(yyv37)) = int(r.DecodeInt(codecSelferBitsize1234))
}
}
yyj22++
if yyhl22 {
yyb22 = yyj22 > l
} else {
yyb22 = r.CheckBreak()
}
if yyb22 {
r.ReadArrayEnd()
return
}
r.ReadArrayElem()
if r.TryDecodeAsNil() {
x.EvictLocalStoragePods = false
} else {
yyv39 := &x.EvictLocalStoragePods
yym40 := z.DecBinary()
_ = yym40
if false {
} else {
*((*bool)(yyv39)) = r.DecodeBool()
} }
} }
for { for {
yyj20++ yyj22++
if yyhl20 { if yyhl22 {
yyb20 = yyj20 > l yyb22 = yyj22 > l
} else { } else {
yyb20 = r.CheckBreak() yyb22 = r.CheckBreak()
} }
if yyb20 { if yyb22 {
break break
} }
r.ReadArrayElem() r.ReadArrayElem()
z.DecStructFieldNotFound(yyj20-1, "") z.DecStructFieldNotFound(yyj22-1, "")
} }
r.ReadArrayEnd() r.ReadArrayEnd()
} }

View File

@@ -45,4 +45,7 @@ type DeschedulerConfiguration struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node. // MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"` MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
} }

View File

@@ -48,6 +48,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.DryRun = in.DryRun out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
return nil return nil
} }
@@ -63,6 +64,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.DryRun = in.DryRun out.DryRun = in.DryRun
out.NodeSelector = in.NodeSelector out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
return nil return nil
} }

View File

@@ -52,23 +52,23 @@ func IsLatencySensitivePod(pod *v1.Pod) bool {
} }
// IsEvictable checks if a pod is evictable or not. // IsEvictable checks if a pod is evictable or not.
func IsEvictable(pod *v1.Pod) bool { func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
ownerRefList := OwnerRef(pod) ownerRefList := OwnerRef(pod)
if IsMirrorPod(pod) || IsPodWithLocalStorage(pod) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) { if IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) {
return false return false
} }
return true return true
} }
// ListEvictablePodsOnNode returns the list of evictable pods on node. // ListEvictablePodsOnNode returns the list of evictable pods on node.
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) { func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
pods, err := ListPodsOnANode(client, node) pods, err := ListPodsOnANode(client, node)
if err != nil { if err != nil {
return []*v1.Pod{}, err return []*v1.Pod{}, err
} }
evictablePods := make([]*v1.Pod, 0) evictablePods := make([]*v1.Pod, 0)
for _, pod := range pods { for _, pod := range pods {
if !IsEvictable(pod) { if !IsEvictable(pod, evictLocalStoragePods) {
continue continue
} else { } else {
evictablePods = append(evictablePods, pod) evictablePods = append(evictablePods, pod)

View File

@@ -40,15 +40,15 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
if !strategy.Enabled { if !strategy.Enabled {
return return
} }
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode) deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
} }
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods. // deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int { func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0 podsEvicted := 0
for _, node := range nodes { for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v", node.Name) glog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node) dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
for creator, pods := range dpm { for creator, pods := range dpm {
if len(pods) > 1 { if len(pods) > 1 {
glog.V(1).Infof("%#v", creator) glog.V(1).Infof("%#v", creator)
@@ -73,8 +73,8 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
} }
// ListDuplicatePodsOnANode lists duplicate pods on a given node. // ListDuplicatePodsOnANode lists duplicate pods on a given node.
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap { func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, node) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil { if err != nil {
return nil return nil
} }

View File

@@ -77,7 +77,7 @@ func TestFindDuplicatePods(t *testing.T) {
}) })
npe := nodePodEvictedCount{} npe := nodePodEvictedCount{}
npe[node] = 0 npe[node] = 0
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2) podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2, false)
if podsEvicted != expectedEvictedPodCount { if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted") t.Errorf("Unexpected no of pods evicted")
} }

View File

@@ -41,6 +41,7 @@ type NodeUsageMap struct {
bPods []*v1.Pod bPods []*v1.Pod
gPods []*v1.Pod gPods []*v1.Pod
} }
type NodePodsMap map[*v1.Node][]*v1.Pod type NodePodsMap map[*v1.Node][]*v1.Pod
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) { func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
@@ -59,8 +60,8 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
return return
} }
npm := CreateNodePodsMap(ds.Client, nodes) npm := createNodePodsMap(ds.Client, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds) lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
glog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v", glog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods]) thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
@@ -130,10 +131,10 @@ func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between // classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
// low and high thresholds, it is simply ignored. // low and high thresholds, it is simply ignored.
func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds) ([]NodeUsageMap, []NodeUsageMap) { func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool) ([]NodeUsageMap, []NodeUsageMap) {
lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{} lowNodes, targetNodes := []NodeUsageMap{}, []NodeUsageMap{}
for node, pods := range npm { for node, pods := range npm {
usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods) usage, allPods, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods, evictLocalStoragePods)
nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods} nuMap := NodeUsageMap{node, usage, allPods, nonRemovablePods, bePods, bPods, gPods}
// Check if node is underutilized and if we can schedule pods on it. // Check if node is underutilized and if we can schedule pods on it.
@@ -151,6 +152,9 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
return lowNodes, targetNodes return lowNodes, targetNodes
} }
// evictPodsFromTargetNodes evicts pods based on priority, if all the pods on the node have priority, if not
// evicts them based on QoS as fallback option.
// TODO: @ravig Break this function into smaller functions.
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int { func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
podsEvicted := 0 podsEvicted := 0
@@ -191,12 +195,27 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage) glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
currentPodsEvicted := nodepodCount[node.node] currentPodsEvicted := nodepodCount[node.node]
// evict best effort pods // Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict) if node.allPods[0].Spec.Priority != nil {
// evict burstable pods glog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict) evictablePods := make([]*v1.Pod, 0)
// evict guaranteed pods evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
sortPodsBasedOnPriority(evictablePods)
evictPods(evictablePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
} else {
// TODO: Remove this when we support only priority.
// Falling back to evicting pods based on priority.
glog.V(1).Infof("Evicting pods based on QoS")
glog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict burstable pods
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict guaranteed pods
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
}
nodepodCount[node.node] = currentPodsEvicted nodepodCount[node.node] = currentPodsEvicted
podsEvicted = podsEvicted + nodepodCount[node.node] podsEvicted = podsEvicted + nodepodCount[node.node]
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage) glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
@@ -269,7 +288,30 @@ func SortNodesByUsage(nodes []NodeUsageMap) {
}) })
} }
func CreateNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap { // sortPodsBasedOnPriority sorts pods based on priority and if their priorities are equal, they are sorted based on QoS tiers.
func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
sort.Slice(evictablePods, func(i, j int) bool {
if evictablePods[i].Spec.Priority == nil && evictablePods[j].Spec.Priority != nil {
return true
}
if evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority != nil {
return false
}
if (evictablePods[j].Spec.Priority == nil && evictablePods[i].Spec.Priority == nil) || (*evictablePods[i].Spec.Priority == *evictablePods[j].Spec.Priority) {
if podutil.IsBestEffortPod(evictablePods[i]) {
return true
}
if podutil.IsBurstablePod(evictablePods[i]) && podutil.IsGuaranteedPod(evictablePods[j]) {
return true
}
return false
}
return *evictablePods[i].Spec.Priority < *evictablePods[j].Spec.Priority
})
}
// createNodePodsMap returns nodepodsmap with evictable pods on node.
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
npm := NodePodsMap{} npm := NodePodsMap{}
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(client, node) pods, err := podutil.ListPodsOnANode(client, node)
@@ -308,7 +350,8 @@ func IsNodeWithLowUtilization(nodeThresholds api.ResourceThresholds, thresholds
return true return true
} }
func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) { // Nodeutilization returns the current usage of node.
func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool) (api.ResourceThresholds, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod, []*v1.Pod) {
bePods := []*v1.Pod{} bePods := []*v1.Pod{}
nonRemovablePods := []*v1.Pod{} nonRemovablePods := []*v1.Pod{}
bPods := []*v1.Pod{} bPods := []*v1.Pod{}
@@ -316,7 +359,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
totalReqs := map[v1.ResourceName]resource.Quantity{} totalReqs := map[v1.ResourceName]resource.Quantity{}
for _, pod := range pods { for _, pod := range pods {
// We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode // We need to compute the usage of nonRemovablePods unless it is a best effort pod. So, cannot use podutil.ListEvictablePodsOnNode
if !podutil.IsEvictable(pod) { if !podutil.IsEvictable(pod, evictLocalStoragePods) {
nonRemovablePods = append(nonRemovablePods, pod) nonRemovablePods = append(nonRemovablePods, pod)
if podutil.IsBestEffortPod(pod) { if podutil.IsBestEffortPod(pod) {
continue continue

View File

@@ -28,10 +28,11 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"reflect"
) )
// TODO: Make this table driven. // TODO: Make this table driven.
func TestLowNodeUtilization(t *testing.T) { func TestLowNodeUtilizationWithoutPriority(t *testing.T) {
var thresholds = make(api.ResourceThresholds) var thresholds = make(api.ResourceThresholds)
var targetThresholds = make(api.ResourceThresholds) var targetThresholds = make(api.ResourceThresholds)
thresholds[v1.ResourceCPU] = 30 thresholds[v1.ResourceCPU] = 30
@@ -110,8 +111,8 @@ func TestLowNodeUtilization(t *testing.T) {
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName()) return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
}) })
expectedPodsEvicted := 3 expectedPodsEvicted := 3
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3}) npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds) lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 { if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
} }
@@ -126,6 +127,151 @@ func TestLowNodeUtilization(t *testing.T) {
} }
// TODO: Make this table driven.
func TestLowNodeUtilizationWithPriorities(t *testing.T) {
var thresholds = make(api.ResourceThresholds)
var targetThresholds = make(api.ResourceThresholds)
thresholds[v1.ResourceCPU] = 30
thresholds[v1.ResourcePods] = 30
targetThresholds[v1.ResourceCPU] = 50
targetThresholds[v1.ResourcePods] = 50
lowPriority := int32(0)
highPriority := int32(10000)
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
n2 := test.BuildTestNode("n2", 4000, 3000, 10)
n3 := test.BuildTestNode("n3", 4000, 3000, 10)
// Making n3 node unschedulable so that it won't counted in lowUtilized nodes list.
n3.Spec.Unschedulable = true
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
p1.Spec.Priority = &highPriority
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p2.Spec.Priority = &highPriority
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p3.Spec.Priority = &highPriority
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
p4.Spec.Priority = &highPriority
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
p5.Spec.Priority = &lowPriority
// These won't be evicted.
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
p6.Spec.Priority = &highPriority
p7 := test.BuildTestPod("p7", 400, 0, n1.Name)
p7.Spec.Priority = &lowPriority
p8 := test.BuildTestPod("p8", 400, 0, n1.Name)
p8.Spec.Priority = &lowPriority
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
// The following 4 pods won't get evicted.
// A daemonset.
p6.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p7.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
p8.Namespace = "kube-system"
p8.Annotations = test.GetCriticalPodAnnotation()
p9 := test.BuildTestPod("p9", 400, 0, n1.Name)
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") {
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8}}, nil
}
if strings.Contains(fieldString, "n2") {
return true, &v1.PodList{Items: []v1.Pod{*p9}}, nil
}
if strings.Contains(fieldString, "n3") {
return true, &v1.PodList{Items: []v1.Pod{}}, nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
switch getAction.GetName() {
case n1.Name:
return true, n1, nil
case n2.Name:
return true, n2, nil
case n3.Name:
return true, n3, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
expectedPodsEvicted := 3
npm := createNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, false)
if len(lowNodes) != 1 {
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
}
npe := nodePodEvictedCount{}
npe[n1] = 0
npe[n2] = 0
npe[n3] = 0
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
}
}
func TestSortPodsByPriority(t *testing.T) {
n1 := test.BuildTestNode("n1", 4000, 3000, 9)
lowPriority := int32(0)
highPriority := int32(10000)
p1 := test.BuildTestPod("p1", 400, 0, n1.Name)
p1.Spec.Priority = &lowPriority
// BestEffort
p2 := test.BuildTestPod("p2", 400, 0, n1.Name)
p2.Spec.Priority = &highPriority
p2.Spec.Containers[0].Resources.Requests = nil
p2.Spec.Containers[0].Resources.Limits = nil
// Burstable
p3 := test.BuildTestPod("p3", 400, 0, n1.Name)
p3.Spec.Priority = &highPriority
// Guaranteed
p4 := test.BuildTestPod("p4", 400, 100, n1.Name)
p4.Spec.Priority = &highPriority
p4.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
p4.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
// Best effort with nil priorities.
p5 := test.BuildTestPod("p5", 400, 100, n1.Name)
p5.Spec.Priority = nil
p6 := test.BuildTestPod("p6", 400, 100, n1.Name)
p6.Spec.Containers[0].Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(400, resource.DecimalSI)
p6.Spec.Containers[0].Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(100, resource.DecimalSI)
p6.Spec.Priority = nil
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
sortPodsBasedOnPriority(podList)
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
}
}
func TestValidateThresholds(t *testing.T) { func TestValidateThresholds(t *testing.T) {
tests := []struct { tests := []struct {
name string name string

View File

@@ -27,10 +27,10 @@ import (
) )
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) { func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode) removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
} }
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int { func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
evictedPodCount := 0 evictedPodCount := 0
if !strategy.Enabled { if !strategy.Enabled {
return evictedPodCount return evictedPodCount
@@ -44,7 +44,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
for _, node := range nodes { for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name) glog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node) pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
if err != nil { if err != nil {
glog.Errorf("failed to get pods from %v: %v", node.Name, err) glog.Errorf("failed to get pods from %v: %v", node.Name, err)
} }

View File

@@ -176,7 +176,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
Client: fakeClient, Client: fakeClient,
} }
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict) actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict, false)
if actualEvictedPodCount != tc.expectedEvictedPodCount { if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
} }

View File

@@ -35,15 +35,15 @@ func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, stra
if !strategy.Enabled { if !strategy.Enabled {
return return
} }
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode) removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode, ds.EvictLocalStoragePods)
} }
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules. // removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int { func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0 podsEvicted := 0
for _, node := range nodes { for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name) glog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil { if err != nil {
return 0 return 0
} }

View File

@@ -55,13 +55,13 @@ func TestPodAntiAffinity(t *testing.T) {
npe := nodePodEvictedCount{} npe := nodePodEvictedCount{}
npe[node] = 0 npe[node] = 0
expectedEvictedPodCount := 3 expectedEvictedPodCount := 3
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0) podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0, false)
if podsEvicted != expectedEvictedPodCount { if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount) t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
} }
npe[node] = 0 npe[node] = 0
expectedEvictedPodCount = 1 expectedEvictedPodCount = 1
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1) podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1, false)
if podsEvicted != expectedEvictedPodCount { if podsEvicted != expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount) t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, expectedEvictedPodCount)
} }

View File

@@ -37,6 +37,7 @@ func BuildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod
{ {
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{}, Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
}, },
}, },
}, },