mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Remove options.DeschedulerServer from all strategies
This commit is contained in:
@@ -19,6 +19,8 @@ package descheduler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -56,6 +58,8 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||
}
|
||||
|
||||
type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||
|
||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
@@ -63,6 +67,15 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
|
||||
strategyFuncs := map[string]strategyFunction{
|
||||
"RemoveDuplicates": strategies.RemoveDuplicatePods,
|
||||
"LowNodeUtilization": strategies.LowNodeUtilization,
|
||||
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
|
||||
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
|
||||
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
|
||||
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
|
||||
}
|
||||
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
@@ -85,12 +98,11 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
||||
nodes,
|
||||
)
|
||||
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], nodes, podEvictor)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], nodes, podEvictor)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], nodes, podEvictor)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], nodes, podEvictor)
|
||||
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], nodes, podEvictor)
|
||||
strategies.RemovePodsHavingTooManyRestarts(rs, deschedulerPolicy.Strategies["RemovePodsHavingTooManyRestarts"], nodes, podEvictor)
|
||||
for name, f := range strategyFuncs {
|
||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||
f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
}
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
|
||||
@@ -23,35 +23,24 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
//type creator string
|
||||
type DuplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(
|
||||
func RemoveDuplicatePods(
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
evictLocalStoragePods bool,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
klog.V(1).Infof("%#v", creator)
|
||||
@@ -66,18 +55,17 @@ func deleteDuplicatePods(
|
||||
}
|
||||
}
|
||||
|
||||
// ListDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap {
|
||||
//type creator string
|
||||
type duplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return FindDuplicatePods(pods)
|
||||
}
|
||||
|
||||
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||
dpm := DuplicatePodsMap{}
|
||||
dpm := duplicatePodsMap{}
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
|
||||
for _, pod := range pods {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -143,7 +144,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
deleteDuplicatePods(fakeClient, []*v1.Node{node}, false, podEvictor)
|
||||
RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -40,7 +39,7 @@ type NodeUsageMap struct {
|
||||
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
@@ -60,8 +59,8 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
return
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
|
||||
npm := createNodePodsMap(client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
||||
|
||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
|
||||
@@ -95,7 +94,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
ds.EvictLocalStoragePods,
|
||||
evictLocalStoragePods,
|
||||
podEvictor)
|
||||
|
||||
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())
|
||||
|
||||
@@ -33,9 +33,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -623,22 +621,15 @@ func TestWithTaints(t *testing.T) {
|
||||
return true, nil, nil
|
||||
})
|
||||
|
||||
ds := &options.DeschedulerServer{
|
||||
Client: &fake.Clientset{Fake: *fakePtr},
|
||||
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{
|
||||
EvictLocalStoragePods: false,
|
||||
},
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
&fake.Clientset{Fake: *fakePtr},
|
||||
"policy/v1",
|
||||
ds.DryRun,
|
||||
false,
|
||||
item.evictionsExpected,
|
||||
item.nodes,
|
||||
)
|
||||
|
||||
LowNodeUtilization(ds, strategy, item.nodes, podEvictor)
|
||||
LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||
|
||||
if item.evictionsExpected != evictionCounter {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
||||
|
||||
@@ -18,24 +18,16 @@ package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
removePodsViolatingNodeAffinityCount(ds, strategy, nodes, ds.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
@@ -44,7 +36,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
|
||||
@@ -23,9 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -96,21 +94,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
expectedEvictedPodCount int
|
||||
maxPodsToEvict int
|
||||
}{
|
||||
{
|
||||
description: "Strategy disabled, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: api.StrategyParameters{
|
||||
NodeAffinityType: []string{
|
||||
"requiredDuringSchedulingIgnoredDuringExecution",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
maxPodsToEvict: 0,
|
||||
},
|
||||
{
|
||||
description: "Invalid strategy type, should not evict any pods",
|
||||
strategy: api.DeschedulerStrategy{
|
||||
@@ -167,22 +150,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
return true, &v1.PodList{Items: tc.pods}, nil
|
||||
})
|
||||
|
||||
ds := options.DeschedulerServer{
|
||||
Client: fakeClient,
|
||||
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{
|
||||
EvictLocalStoragePods: false,
|
||||
},
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
ds.DryRun,
|
||||
false,
|
||||
tc.maxPodsToEvict,
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(&ds, tc.strategy, tc.nodes, podEvictor)
|
||||
RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -28,16 +27,8 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingNodeTaints with elimination strategy
|
||||
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deletePodsViolatingNodeTaints(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
|
||||
// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func deletePodsViolatingNodeTaints(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -170,7 +171,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
deletePodsViolatingNodeTaints(fakeClient, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -29,16 +28,8 @@ import (
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
removePodsWithAffinityRules(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
|
||||
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
|
||||
func removePodsWithAffinityRules(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -83,7 +84,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
removePodsWithAffinityRules(fakeClient, []*v1.Node{node}, false, podEvictor)
|
||||
RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -18,9 +18,9 @@ package strategies
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -29,17 +29,13 @@ import (
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
return
|
||||
}
|
||||
removePodsHavingTooManyRestarts(ds, strategy, nodes, podEvictor, ds.EvictLocalStoragePods)
|
||||
}
|
||||
|
||||
func removePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictLocalStoragePods bool) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
continue
|
||||
|
||||
@@ -20,14 +20,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -92,15 +91,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
pods := initPods(node)
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: pods}, nil
|
||||
})
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []v1.Pod
|
||||
@@ -165,13 +155,13 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
pods := initPods(node)
|
||||
|
||||
ds := options.DeschedulerServer{
|
||||
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{
|
||||
MaxNoOfPodsToEvictPerNode: tc.maxPodsToEvict,
|
||||
},
|
||||
Client: fakeClient,
|
||||
}
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: pods}, nil
|
||||
})
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
@@ -181,7 +171,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
removePodsHavingTooManyRestarts(&ds, tc.strategy, []*v1.Node{node}, podEvictor, ds.EvictLocalStoragePods)
|
||||
RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
Reference in New Issue
Block a user