mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #84 from ingvagabund/max-pods-per-node
Max pods per node
This commit is contained in:
6
Makefile
6
Makefile
@@ -46,3 +46,9 @@ test-unit:
|
||||
|
||||
test-e2e:
|
||||
./test/run-e2e-tests.sh
|
||||
|
||||
gen:
|
||||
./hack/update-codecgen.sh
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
|
||||
@@ -55,4 +55,6 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||
}
|
||||
|
||||
@@ -77,16 +77,16 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
yyq2[0] = x.Kind != ""
|
||||
yyq2[1] = x.APIVersion != ""
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(7)
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 5
|
||||
var yynn2 = 6
|
||||
for _, b := range yyq2 {
|
||||
if b {
|
||||
yynn2++
|
||||
@@ -244,6 +244,25 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
r.EncodeString(codecSelferCcUTF81234, string(x.NodeSelector))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `MaxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -389,6 +408,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "MaxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -400,16 +431,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -417,29 +448,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
@@ -447,13 +456,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -461,23 +492,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt24 := z.Extension(z.I2Rtid(yyv23)); yyxt24 != nil {
|
||||
z.DecExtension(yyv23, yyxt24)
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -485,29 +516,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
@@ -515,13 +524,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -529,21 +560,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -551,26 +582,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
} else {
|
||||
yysep2 := !z.EncBinary()
|
||||
yy2arr2 := z.EncBasicHandle().StructToArray
|
||||
var yyq2 [7]bool
|
||||
var yyq2 [8]bool
|
||||
_ = yyq2
|
||||
_, _ = yysep2, yy2arr2
|
||||
const yyr2 bool = false
|
||||
@@ -87,8 +87,9 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
yyq2[4] = x.PolicyConfigFile != ""
|
||||
yyq2[5] = x.DryRun != false
|
||||
yyq2[6] = x.NodeSelector != ""
|
||||
yyq2[7] = x.MaxNoOfPodsToEvictPerNode != 0
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayStart(7)
|
||||
r.WriteArrayStart(8)
|
||||
} else {
|
||||
var yynn2 = 1
|
||||
for _, b := range yyq2 {
|
||||
@@ -272,6 +273,31 @@ func (x *DeschedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayElem()
|
||||
if yyq2[7] {
|
||||
yym25 := z.EncBinary()
|
||||
_ = yym25
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
} else {
|
||||
r.EncodeInt(0)
|
||||
}
|
||||
} else {
|
||||
if yyq2[7] {
|
||||
r.WriteMapElemKey()
|
||||
r.EncStructFieldKey(codecSelferValueTypeString1234, `maxNoOfPodsToEvictPerNode`)
|
||||
r.WriteMapElemValue()
|
||||
yym26 := z.EncBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
r.EncodeInt(int64(x.MaxNoOfPodsToEvictPerNode))
|
||||
}
|
||||
}
|
||||
}
|
||||
if yyr2 || yy2arr2 {
|
||||
r.WriteArrayEnd()
|
||||
} else {
|
||||
@@ -417,6 +443,18 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.De
|
||||
*((*string)(yyv16)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
case "maxNoOfPodsToEvictPerNode":
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv18 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym19 := z.DecBinary()
|
||||
_ = yym19
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv18)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
default:
|
||||
z.DecStructFieldNotFound(-1, yys3)
|
||||
} // end switch yys3
|
||||
@@ -428,16 +466,16 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
var h codecSelfer1234
|
||||
z, r := codec1978.GenHelperDecoder(d)
|
||||
_, _, _ = h, z, r
|
||||
var yyj18 int
|
||||
var yyb18 bool
|
||||
var yyhl18 bool = l >= 0
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
var yyj20 int
|
||||
var yyb20 bool
|
||||
var yyhl20 bool = l >= 0
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -445,29 +483,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.Kind = ""
|
||||
} else {
|
||||
yyv19 := &x.Kind
|
||||
yym20 := z.DecBinary()
|
||||
_ = yym20
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv19)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv21 := &x.APIVersion
|
||||
yyv21 := &x.Kind
|
||||
yym22 := z.DecBinary()
|
||||
_ = yym22
|
||||
if false {
|
||||
@@ -475,13 +491,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv21)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.APIVersion = ""
|
||||
} else {
|
||||
yyv23 := &x.APIVersion
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv23)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -489,23 +527,23 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DeschedulingInterval = 0
|
||||
} else {
|
||||
yyv23 := &x.DeschedulingInterval
|
||||
yym24 := z.DecBinary()
|
||||
_ = yym24
|
||||
yyv25 := &x.DeschedulingInterval
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else if yyxt24 := z.Extension(z.I2Rtid(yyv23)); yyxt24 != nil {
|
||||
z.DecExtension(yyv23, yyxt24)
|
||||
} else if yyxt26 := z.Extension(z.I2Rtid(yyv25)); yyxt26 != nil {
|
||||
z.DecExtension(yyv25, yyxt26)
|
||||
} else {
|
||||
*((*int64)(yyv23)) = int64(r.DecodeInt(64))
|
||||
*((*int64)(yyv25)) = int64(r.DecodeInt(64))
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -513,29 +551,7 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.KubeconfigFile = ""
|
||||
} else {
|
||||
yyv25 := &x.KubeconfigFile
|
||||
yym26 := z.DecBinary()
|
||||
_ = yym26
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv25)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv27 := &x.PolicyConfigFile
|
||||
yyv27 := &x.KubeconfigFile
|
||||
yym28 := z.DecBinary()
|
||||
_ = yym28
|
||||
if false {
|
||||
@@ -543,13 +559,35 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
*((*string)(yyv27)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.PolicyConfigFile = ""
|
||||
} else {
|
||||
yyv29 := &x.PolicyConfigFile
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv29)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -557,21 +595,21 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.DryRun = false
|
||||
} else {
|
||||
yyv29 := &x.DryRun
|
||||
yym30 := z.DecBinary()
|
||||
_ = yym30
|
||||
yyv31 := &x.DryRun
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
if false {
|
||||
} else {
|
||||
*((*bool)(yyv29)) = r.DecodeBool()
|
||||
*((*bool)(yyv31)) = r.DecodeBool()
|
||||
}
|
||||
}
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
@@ -579,26 +617,48 @@ func (x *DeschedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.
|
||||
if r.TryDecodeAsNil() {
|
||||
x.NodeSelector = ""
|
||||
} else {
|
||||
yyv31 := &x.NodeSelector
|
||||
yym32 := z.DecBinary()
|
||||
_ = yym32
|
||||
yyv33 := &x.NodeSelector
|
||||
yym34 := z.DecBinary()
|
||||
_ = yym34
|
||||
if false {
|
||||
} else {
|
||||
*((*string)(yyv31)) = r.DecodeString()
|
||||
*((*string)(yyv33)) = r.DecodeString()
|
||||
}
|
||||
}
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb20 {
|
||||
r.ReadArrayEnd()
|
||||
return
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
if r.TryDecodeAsNil() {
|
||||
x.MaxNoOfPodsToEvictPerNode = 0
|
||||
} else {
|
||||
yyv35 := &x.MaxNoOfPodsToEvictPerNode
|
||||
yym36 := z.DecBinary()
|
||||
_ = yym36
|
||||
if false {
|
||||
} else {
|
||||
*((*int)(yyv35)) = int(r.DecodeInt(codecSelferBitsize1234))
|
||||
}
|
||||
}
|
||||
for {
|
||||
yyj18++
|
||||
if yyhl18 {
|
||||
yyb18 = yyj18 > l
|
||||
yyj20++
|
||||
if yyhl20 {
|
||||
yyb20 = yyj20 > l
|
||||
} else {
|
||||
yyb18 = r.CheckBreak()
|
||||
yyb20 = r.CheckBreak()
|
||||
}
|
||||
if yyb18 {
|
||||
if yyb20 {
|
||||
break
|
||||
}
|
||||
r.ReadArrayElem()
|
||||
z.DecStructFieldNotFound(yyj18-1, "")
|
||||
z.DecStructFieldNotFound(yyj20-1, "")
|
||||
}
|
||||
r.ReadArrayEnd()
|
||||
}
|
||||
|
||||
@@ -42,4 +42,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// Node selectors
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
}
|
||||
|
||||
@@ -47,6 +47,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,6 +62,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.PolicyConfigFile = in.PolicyConfigFile
|
||||
out.DryRun = in.DryRun
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -60,10 +60,11 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes)
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
@@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodepodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
@@ -53,16 +54,20 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
|
||||
glog.V(1).Infof("%#v", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
if nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
|
||||
} else {
|
||||
podsEvicted++
|
||||
nodepodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodepodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
@@ -37,11 +37,15 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
|
||||
// All the following pods expect for one will be evicted.
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
@@ -66,12 +70,14 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
expectedEvictedPodCount := 2
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ type NodeUsageMap struct {
|
||||
}
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
@@ -90,7 +90,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
|
||||
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
|
||||
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
|
||||
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
|
||||
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
return lowNodes, targetNodes
|
||||
}
|
||||
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool, maxPodsToEvict int, nodepodCount nodePodEvictedCount) int {
|
||||
podsEvicted := 0
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
@@ -189,17 +189,17 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
|
||||
currentPodsEvicted := podsEvicted
|
||||
currentPodsEvicted := nodepodCount[node.node]
|
||||
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict burstable pods
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
evictPods(node.bPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
// evict guaranteed pods
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
|
||||
podsEvictedFromNode := podsEvicted - currentPodsEvicted
|
||||
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", podsEvictedFromNode, node.node.Name, node.usage)
|
||||
evictPods(node.gPods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, ¤tPodsEvicted, dryRun, maxPodsToEvict)
|
||||
nodepodCount[node.node] = currentPodsEvicted
|
||||
podsEvicted = podsEvicted + nodepodCount[node.node]
|
||||
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
@@ -214,10 +214,13 @@ func evictPods(inputPods []*v1.Pod,
|
||||
totalCpu *float64,
|
||||
totalMem *float64,
|
||||
podsEvicted *int,
|
||||
dryRun bool) {
|
||||
dryRun bool, maxPodsToEvict int) {
|
||||
if IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) && (*totalPods > 0 || *totalCpu > 0 || *totalMem > 0) {
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapacity.Pods().Value()))
|
||||
for _, pod := range inputPods {
|
||||
if *podsEvicted+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
cUsage := helper.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
|
||||
@@ -109,15 +109,19 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
|
||||
})
|
||||
expectedPodsEvicted := 4
|
||||
expectedPodsEvicted := 3
|
||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2, n3})
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
}
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[n1] = 0
|
||||
npe[n2] = 0
|
||||
npe[n3] = 0
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false, 3, npe)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -26,12 +26,12 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
||||
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes)
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
evictionCount := removePodsViolatingNodeAffinityCount(ds, strategy, evictionPolicyGroupVersion, nodes, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
glog.V(1).Infof("Evicted %v pods", evictionCount)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) int {
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node, nodepodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
evictedPodCount := 0
|
||||
if !strategy.Enabled {
|
||||
return evictedPodCount
|
||||
@@ -51,15 +51,19 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
if nodepodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
glog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, false)
|
||||
evictedPodCount++
|
||||
nodepodCount[node]++
|
||||
}
|
||||
}
|
||||
}
|
||||
evictedPodCount += nodepodCount[node]
|
||||
}
|
||||
default:
|
||||
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
@@ -92,6 +92,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
pods []v1.Pod
|
||||
strategy api.DeschedulerStrategy
|
||||
expectedEvictedPodCount int
|
||||
npe nodePodEvictedCount
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "Strategy disabled, should not evict any pods",
|
||||
@@ -104,8 +106,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
@@ -118,15 +122,19 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected",
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithLabels),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
@@ -134,6 +142,17 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 1,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvict set to 0, should not be evicted",
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, nodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
|
||||
@@ -141,6 +160,8 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
npe: nodePodEvictedCount{nodeWithoutLabels: 0, unschedulableNodeWithLabels: 0},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -155,7 +176,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
Client: fakeClient,
|
||||
}
|
||||
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes)
|
||||
actualEvictedPodCount := removePodsViolatingNodeAffinityCount(&ds, tc.strategy, "v1", tc.nodes, tc.npe, tc.maxPodsToEvict)
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
}
|
||||
|
||||
@@ -31,15 +31,15 @@ import (
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node, nodePodCount nodePodEvictedCount) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
||||
removePodsWithAffinityRules(ds.Client, policyGroupVersion, nodes, ds.DryRun, nodePodCount, ds.MaxNoOfPodsToEvictPerNode)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
glog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
@@ -49,12 +49,15 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if nodePodCount[node]+1 > maxPodsToEvict {
|
||||
break
|
||||
}
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
podsEvicted++
|
||||
nodePodCount[node]++
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
|
||||
// Since the current pod is evicted all other pods which have anti-affinity with this
|
||||
// pod need not be evicted.
|
||||
@@ -65,6 +68,7 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
podsEvicted += nodePodCount[node]
|
||||
}
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
@@ -79,8 +79,15 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
expectedEvictedPodCount := 1
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
expectedEvictedPodCount := 0
|
||||
podsEvicted := removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 0)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
expectedEvictedPodCount = 1
|
||||
podsEvicted = removePodsWithAffinityRules(fakeClient, "v1", []*v1.Node{node}, false, npe, 1)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
37
pkg/descheduler/strategies/util.go
Normal file
37
pkg/descheduler/strategies/util.go
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// This file contains the datastructures, types & functions needed by all the strategies so that we don't have
|
||||
// to compute them again in each strategy.
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node. This is used in conjunction with strategies to
|
||||
type nodePodEvictedCount map[*v1.Node]int
|
||||
|
||||
// InitializeNodePodCount initializes the nodePodCount.
|
||||
func InitializeNodePodCount(nodeList []*v1.Node) nodePodEvictedCount {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
for _, node := range nodeList {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node] = 0
|
||||
}
|
||||
return nodePodCount
|
||||
}
|
||||
@@ -112,7 +112,8 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
|
||||
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}
|
||||
lowNodeUtilizationStrategy := deschedulerapi.DeschedulerStrategy{Enabled: true, Params: nodeUtilizationStrategyParams}
|
||||
ds := &options.DeschedulerServer{Client: clientset}
|
||||
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes)
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user