mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Split pod evictor and evictor filter
This commit is contained in:
@@ -45,6 +45,7 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
@@ -87,7 +88,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
@@ -283,18 +284,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
evictLocalStoragePods,
|
||||
evictSystemCriticalPods,
|
||||
ignorePvcPods,
|
||||
evictBarePods,
|
||||
!rs.DisableMetrics,
|
||||
)
|
||||
|
||||
for name, strategy := range deschedulerPolicy.Strategies {
|
||||
if f, ok := strategyFuncs[name]; ok {
|
||||
if strategy.Enabled {
|
||||
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
|
||||
nodeFit := false
|
||||
if name != "PodLifeTime" {
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, rs.Client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
continue
|
||||
}
|
||||
evictorFilter := evictions.NewEvictorFilter(nodes, getPodsAssignedToNode, evictLocalStoragePods, evictSystemCriticalPods, ignorePvcPods, evictBarePods, evictions.WithNodeFit(nodeFit), evictions.WithPriorityThreshold(thresholdPriority))
|
||||
f(ctx, rs.Client, strategy, nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
|
||||
@@ -51,17 +51,12 @@ type namespacePodEvictCount map[string]uint
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
nodepodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
ignorePvcPods bool
|
||||
metricsEnabled bool
|
||||
}
|
||||
|
||||
@@ -72,11 +67,6 @@ func NewPodEvictor(
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
||||
evictLocalStoragePods bool,
|
||||
evictSystemCriticalPods bool,
|
||||
ignorePvcPods bool,
|
||||
evictFailedBarePods bool,
|
||||
metricsEnabled bool,
|
||||
) *PodEvictor {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
@@ -89,17 +79,12 @@ func NewPodEvictor(
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
nodeIndexer: nodeIndexer,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||
nodepodCount: nodePodCount,
|
||||
namespacePodCount: namespacePodCount,
|
||||
evictLocalStoragePods: evictLocalStoragePods,
|
||||
evictSystemCriticalPods: evictSystemCriticalPods,
|
||||
evictFailedBarePods: evictFailedBarePods,
|
||||
ignorePvcPods: ignorePvcPods,
|
||||
metricsEnabled: metricsEnabled,
|
||||
}
|
||||
}
|
||||
@@ -230,21 +215,26 @@ func WithLabelSelector(labelSelector labels.Selector) func(opts *Options) {
|
||||
|
||||
type constraint func(pod *v1.Pod) error
|
||||
|
||||
type evictable struct {
|
||||
type EvictorFilter struct {
|
||||
constraints []constraint
|
||||
}
|
||||
|
||||
// Evictable provides an implementation of IsEvictable(IsEvictable(pod *v1.Pod) bool).
|
||||
// The method accepts a list of options which allow to extend constraints
|
||||
// which decides when a pod is considered evictable.
|
||||
func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
func NewEvictorFilter(
|
||||
nodes []*v1.Node,
|
||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
||||
evictLocalStoragePods bool,
|
||||
evictSystemCriticalPods bool,
|
||||
ignorePvcPods bool,
|
||||
evictFailedBarePods bool,
|
||||
opts ...func(opts *Options),
|
||||
) *EvictorFilter {
|
||||
options := &Options{}
|
||||
for _, opt := range opts {
|
||||
opt(options)
|
||||
}
|
||||
|
||||
ev := &evictable{}
|
||||
if pe.evictFailedBarePods {
|
||||
ev := &EvictorFilter{}
|
||||
if evictFailedBarePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||
@@ -263,7 +253,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !pe.evictSystemCriticalPods {
|
||||
if !evictSystemCriticalPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
// Moved from IsEvictable function to allow for disabling
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
@@ -281,7 +271,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
})
|
||||
}
|
||||
}
|
||||
if !pe.evictLocalStoragePods {
|
||||
if !evictLocalStoragePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||
@@ -289,7 +279,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if pe.ignorePvcPods {
|
||||
if ignorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||
@@ -299,7 +289,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
}
|
||||
if options.nodeFit {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
|
||||
if !nodeutil.PodFitsAnyOtherNode(nodeIndexer, pod, nodes) {
|
||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
||||
}
|
||||
return nil
|
||||
@@ -318,7 +308,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
}
|
||||
|
||||
// IsEvictable decides when a pod is evictable
|
||||
func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
func (ef *EvictorFilter) Filter(pod *v1.Pod) bool {
|
||||
checkErrs := []error{}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
@@ -338,7 +328,7 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
|
||||
}
|
||||
|
||||
for _, c := range ev.constraints {
|
||||
for _, c := range ef.constraints {
|
||||
if err := c(pod); err != nil {
|
||||
checkErrs = append(checkErrs, err)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
@@ -639,19 +638,6 @@ func TestIsEvictable(t *testing.T) {
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := &PodEvictor{
|
||||
client: fakeClient,
|
||||
nodes: nodes,
|
||||
nodeIndexer: getPodsAssignedToNode,
|
||||
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
|
||||
dryRun: false,
|
||||
maxPodsToEvictPerNode: nil,
|
||||
maxPodsToEvictPerNamespace: nil,
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
evictFailedBarePods: test.evictFailedBarePods,
|
||||
}
|
||||
|
||||
var opts []func(opts *Options)
|
||||
if test.priorityThreshold != nil {
|
||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||
@@ -659,9 +645,18 @@ func TestIsEvictable(t *testing.T) {
|
||||
if test.nodeFit {
|
||||
opts = append(opts, WithNodeFit(true))
|
||||
}
|
||||
evictable := podEvictor.Evictable(opts...)
|
||||
|
||||
result := evictable.IsEvictable(test.pods[0])
|
||||
evictorFilter := NewEvictorFilter(
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
test.evictLocalStoragePods,
|
||||
test.evictSystemCriticalPods,
|
||||
false,
|
||||
test.evictFailedBarePods,
|
||||
opts...,
|
||||
)
|
||||
|
||||
result := evictorFilter.Filter(test.pods[0])
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
|
||||
@@ -66,17 +66,13 @@ func RemoveDuplicatePods(
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
evictorFilter *evictions.EvictorFilter,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) {
|
||||
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if strategy.Params != nil && strategy.Params.Namespaces != nil {
|
||||
@@ -84,20 +80,13 @@ func RemoveDuplicatePods(
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
duplicatePods := make(map[podOwner]map[string][]*v1.Pod)
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(evictable.IsEvictable).
|
||||
WithFilter(evictorFilter.Filter).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
|
||||
@@ -319,16 +319,26 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
nodeFit := false
|
||||
if testCase.strategy.Params != nil {
|
||||
nodeFit = testCase.strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
@@ -748,15 +758,19 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -33,6 +33,7 @@ func RemoveFailedPods(
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
evictorFilter *evictions.EvictorFilter,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) {
|
||||
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
|
||||
@@ -41,19 +42,13 @@ func RemoveFailedPods(
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(
|
||||
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
|
||||
evictions.WithNodeFit(strategyParams.NodeFit),
|
||||
evictions.WithLabelSelector(strategyParams.LabelSelector),
|
||||
)
|
||||
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(evictable.IsEvictable).
|
||||
WithFilter(evictorFilter.Filter).
|
||||
WithNamespaces(strategyParams.IncludedNamespaces).
|
||||
WithoutNamespaces(strategyParams.ExcludedNamespaces).
|
||||
WithLabelSelector(labelSelector).
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
description: "default empty strategy, 0 failures, 0 evictions",
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: false}},
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
@@ -274,16 +274,21 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
|
||||
)
|
||||
|
||||
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) error {
|
||||
@@ -48,16 +47,11 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
|
||||
}
|
||||
|
||||
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if strategy.Params.Namespaces != nil {
|
||||
@@ -65,13 +59,6 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
@@ -94,7 +81,7 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
node.Name,
|
||||
getPodsAssignedToNode,
|
||||
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
return evictable.IsEvictable(pod) &&
|
||||
return evictorFilter.Filter(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
|
||||
}),
|
||||
|
||||
@@ -221,16 +221,26 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
nodeFit := false
|
||||
if tc.strategy.Params != nil {
|
||||
nodeFit = tc.strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
|
||||
}
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
|
||||
return
|
||||
@@ -68,21 +68,8 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(evictable.IsEvictable).
|
||||
WithFilter(evictorFilter.Filter).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(labelSelector).
|
||||
|
||||
@@ -338,11 +338,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
@@ -354,7 +349,17 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(tc.nodeFit),
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -29,28 +29,16 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateHighUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
@@ -108,8 +96,6 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -129,7 +115,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
podEvictor,
|
||||
evictable.IsEvictable,
|
||||
evictorFilter.Filter,
|
||||
resourceNames,
|
||||
"HighNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -507,11 +507,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
@@ -524,7 +519,18 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||
)
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if testCase.expectedPodsEvicted != podsEvicted {
|
||||
@@ -713,15 +719,19 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
||||
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -29,27 +29,17 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
|
||||
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
|
||||
return
|
||||
}
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
@@ -152,8 +142,6 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||
@@ -176,7 +164,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
podEvictor,
|
||||
evictable.IsEvictable,
|
||||
evictorFilter.Filter,
|
||||
resourceNames,
|
||||
"LowNodeUtilization",
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -772,11 +772,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
@@ -791,7 +786,18 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
NodeFit: true,
|
||||
},
|
||||
}
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(strategy.Params.NodeFit),
|
||||
)
|
||||
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
@@ -1087,15 +1093,19 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
|
||||
LowNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
|
||||
if item.evictionsExpected != podEvictor.TotalEvicted() {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())
|
||||
|
||||
@@ -49,7 +49,7 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
|
||||
}
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
|
||||
return
|
||||
@@ -65,19 +65,6 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
@@ -98,7 +85,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictable.IsEvictable(pods[i]) {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) && evictorFilter.Filter(pods[i]) {
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node, "InterPodAntiAffinity")
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod")
|
||||
|
||||
@@ -218,11 +218,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
test.maxPodsToEvictPerNode,
|
||||
test.maxNoOfPodsToEvictPerNamespace,
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
strategy := api.DeschedulerStrategy{
|
||||
@@ -231,7 +226,17 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(test.nodeFit),
|
||||
)
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
@@ -57,32 +56,24 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
|
||||
}
|
||||
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid PodLifeTime parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
|
||||
|
||||
filter := evictable.IsEvictable
|
||||
filter := evictorFilter.Filter
|
||||
if strategy.Params.PodLifeTime.PodStatusPhases != nil {
|
||||
filter = func(pod *v1.Pod) bool {
|
||||
for _, phase := range strategy.Params.PodLifeTime.PodStatusPhases {
|
||||
if string(pod.Status.Phase) == phase {
|
||||
return evictable.IsEvictable(pod)
|
||||
return evictorFilter.Filter(pod)
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -339,16 +339,20 @@ func TestPodLifeTime(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
tc.ignorePvcPods,
|
||||
false,
|
||||
false,
|
||||
)
|
||||
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameters) error {
|
||||
@@ -50,33 +49,20 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictorFilter *evictions.EvictorFilter, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
|
||||
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
|
||||
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
|
||||
return
|
||||
}
|
||||
|
||||
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, strategy.Params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Failed to get threshold priority from strategy's params")
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(evictable.IsEvictable).
|
||||
WithFilter(evictorFilter.Filter).
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithLabelSelector(strategy.Params.LabelSelector).
|
||||
|
||||
@@ -249,16 +249,21 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(tc.strategy.Params.NodeFit),
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -46,12 +46,14 @@ type topology struct {
|
||||
pods []*v1.Pod
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func RemovePodsViolatingTopologySpreadConstraint(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
evictorFilter *evictions.EvictorFilter,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) {
|
||||
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
|
||||
@@ -60,11 +62,13 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
return
|
||||
}
|
||||
|
||||
evictable := podEvictor.Evictable(
|
||||
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
|
||||
evictions.WithNodeFit(strategyParams.NodeFit),
|
||||
evictions.WithLabelSelector(strategyParams.LabelSelector),
|
||||
)
|
||||
isEvictable := evictorFilter.Filter
|
||||
|
||||
if strategyParams.LabelSelector != nil && !strategyParams.LabelSelector.Empty() {
|
||||
isEvictable = podutil.WrapFilterFuncs(isEvictable, func(pod *v1.Pod) bool {
|
||||
return strategyParams.LabelSelector.Matches(labels.Set(pod.Labels))
|
||||
})
|
||||
}
|
||||
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
for _, node := range nodes {
|
||||
@@ -168,12 +172,12 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
continue
|
||||
}
|
||||
balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodes)
|
||||
balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictorFilter.Filter, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
for pod := range podsForEviction {
|
||||
if !evictable.IsEvictable(pod) {
|
||||
if !isEvictable(pod) {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, nodeMap[pod.Spec.NodeName], "PodTopologySpread"); err != nil {
|
||||
@@ -234,6 +238,7 @@ func balanceDomains(
|
||||
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
|
||||
// i is the index for belowOrEqualAvg
|
||||
// j is the index for aboveAvg
|
||||
i := 0
|
||||
|
||||
@@ -970,15 +970,26 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
)
|
||||
|
||||
nodeFit := false
|
||||
if tc.strategy.Params != nil {
|
||||
nodeFit = tc.strategy.Params.NodeFit
|
||||
}
|
||||
|
||||
evictorFilter := evictions.NewEvictorFilter(
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
evictions.WithNodeFit(nodeFit),
|
||||
)
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
|
||||
|
||||
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, evictorFilter, getPodsAssignedToNode)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
|
||||
|
||||
Reference in New Issue
Block a user