1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Merge pull request #267 from ingvagabund/drop-DeschedulerServer-from-strategies

Drop descheduler server from strategies
This commit is contained in:
Kubernetes Prow Robot
2020-04-28 08:34:06 -07:00
committed by GitHub
14 changed files with 64 additions and 137 deletions

View File

@@ -19,6 +19,8 @@ package descheduler
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@@ -56,6 +58,8 @@ func Run(rs *options.DeschedulerServer) error {
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
} }
type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes() nodeInformer := sharedInformerFactory.Core().V1().Nodes()
@@ -63,6 +67,15 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
sharedInformerFactory.Start(stopChannel) sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel) sharedInformerFactory.WaitForCacheSync(stopChannel)
strategyFuncs := map[string]strategyFunction{
"RemoveDuplicates": strategies.RemoveDuplicatePods,
"LowNodeUtilization": strategies.LowNodeUtilization,
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
}
wait.Until(func() { wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel) nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
if err != nil { if err != nil {
@@ -85,12 +98,11 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
nodes, nodes,
) )
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], nodes, podEvictor) for name, f := range strategyFuncs {
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], nodes, podEvictor) if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], nodes, podEvictor) f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], nodes, podEvictor) }
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], nodes, podEvictor) }
strategies.RemovePodsHavingTooManyRestarts(rs, deschedulerPolicy.Strategies["RemovePodsHavingTooManyRestarts"], nodes, podEvictor)
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration // If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 { if rs.DeschedulingInterval.Seconds() == 0 {

View File

@@ -23,35 +23,24 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog" "k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
) )
//type creator string
type DuplicatePodsMap map[string][]*v1.Pod
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. // RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. // namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemoveDuplicatePods(
if !strategy.Enabled {
return
}
deleteDuplicatePods(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
}
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
func deleteDuplicatePods(
client clientset.Interface, client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node, nodes []*v1.Node,
evictLocalStoragePods bool, evictLocalStoragePods bool,
podEvictor *evictions.PodEvictor, podEvictor *evictions.PodEvictor,
) { ) {
for _, node := range nodes { for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v", node.Name) klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods) dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
for creator, pods := range dpm { for creator, pods := range dpm {
if len(pods) > 1 { if len(pods) > 1 {
klog.V(1).Infof("%#v", creator) klog.V(1).Infof("%#v", creator)
@@ -66,18 +55,17 @@ func deleteDuplicatePods(
} }
} }
// ListDuplicatePodsOnANode lists duplicate pods on a given node. //type creator string
func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) DuplicatePodsMap { type duplicatePodsMap map[string][]*v1.Pod
// listDuplicatePodsOnANode lists duplicate pods on a given node.
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil { if err != nil {
return nil return nil
} }
return FindDuplicatePods(pods)
}
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap. dpm := duplicatePodsMap{}
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
dpm := DuplicatePodsMap{}
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error. // Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
for _, pod := range pods { for _, pod := range pods {
ownerRefList := podutil.OwnerRef(pod) ownerRefList := podutil.OwnerRef(pod)

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
@@ -143,7 +144,7 @@ func TestFindDuplicatePods(t *testing.T) {
[]*v1.Node{node}, []*v1.Node{node},
) )
deleteDuplicatePods(fakeClient, []*v1.Node{node}, false, podEvictor) RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount { if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)

View File

@@ -24,7 +24,6 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog" "k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
@@ -40,7 +39,7 @@ type NodeUsageMap struct {
type NodePodsMap map[*v1.Node][]*v1.Pod type NodePodsMap map[*v1.Node][]*v1.Pod
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
if !strategy.Enabled { if !strategy.Enabled {
return return
} }
@@ -60,8 +59,8 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
return return
} }
npm := createNodePodsMap(ds.Client, nodes) npm := createNodePodsMap(client, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods) lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v", klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods]) thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
@@ -95,7 +94,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
targetNodes, targetNodes,
lowNodes, lowNodes,
targetThresholds, targetThresholds,
ds.EvictLocalStoragePods, evictLocalStoragePods,
podEvictor) podEvictor)
klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted()) klog.V(1).Infof("Total number of pods evicted: %v", podEvictor.TotalEvicted())

View File

@@ -33,9 +33,7 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
@@ -623,22 +621,15 @@ func TestWithTaints(t *testing.T) {
return true, nil, nil return true, nil, nil
}) })
ds := &options.DeschedulerServer{
Client: &fake.Clientset{Fake: *fakePtr},
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{
EvictLocalStoragePods: false,
},
}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
&fake.Clientset{Fake: *fakePtr}, &fake.Clientset{Fake: *fakePtr},
"policy/v1", "policy/v1",
ds.DryRun, false,
item.evictionsExpected, item.evictionsExpected,
item.nodes, item.nodes,
) )
LowNodeUtilization(ds, strategy, item.nodes, podEvictor) LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
if item.evictionsExpected != evictionCounter { if item.evictionsExpected != evictionCounter {
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter) t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)

View File

@@ -18,24 +18,16 @@ package strategies
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog" "k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
) )
func RemovePodsViolatingNodeAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
if !strategy.Enabled {
return
}
removePodsViolatingNodeAffinityCount(ds, strategy, nodes, ds.EvictLocalStoragePods, podEvictor)
}
func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
for _, nodeAffinity := range strategy.Params.NodeAffinityType { for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity) klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
@@ -44,7 +36,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
for _, node := range nodes { for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name) klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil { if err != nil {
klog.Errorf("failed to get pods from %v: %v", node.Name, err) klog.Errorf("failed to get pods from %v: %v", node.Name, err)
} }

View File

@@ -23,9 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -96,21 +94,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
expectedEvictedPodCount int expectedEvictedPodCount int
maxPodsToEvict int maxPodsToEvict int
}{ }{
{
description: "Strategy disabled, should not evict any pods",
strategy: api.DeschedulerStrategy{
Enabled: false,
Params: api.StrategyParameters{
NodeAffinityType: []string{
"requiredDuringSchedulingIgnoredDuringExecution",
},
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvict: 0,
},
{ {
description: "Invalid strategy type, should not evict any pods", description: "Invalid strategy type, should not evict any pods",
strategy: api.DeschedulerStrategy{ strategy: api.DeschedulerStrategy{
@@ -167,22 +150,15 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
return true, &v1.PodList{Items: tc.pods}, nil return true, &v1.PodList{Items: tc.pods}, nil
}) })
ds := options.DeschedulerServer{
Client: fakeClient,
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{
EvictLocalStoragePods: false,
},
}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
"v1", "v1",
ds.DryRun, false,
tc.maxPodsToEvict, tc.maxPodsToEvict,
tc.nodes, tc.nodes,
) )
RemovePodsViolatingNodeAffinity(&ds, tc.strategy, tc.nodes, podEvictor) RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted() actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount { if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)

View File

@@ -17,7 +17,6 @@ limitations under the License.
package strategies package strategies
import ( import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -28,16 +27,8 @@ import (
"k8s.io/klog" "k8s.io/klog"
) )
// RemovePodsViolatingNodeTaints with elimination strategy // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func RemovePodsViolatingNodeTaints(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
if !strategy.Enabled {
return
}
deletePodsViolatingNodeTaints(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
}
// deletePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func deletePodsViolatingNodeTaints(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
for _, node := range nodes { for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name) klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)

View File

@@ -9,6 +9,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
@@ -170,7 +171,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
tc.nodes, tc.nodes,
) )
deletePodsViolatingNodeTaints(fakeClient, tc.nodes, tc.evictLocalStoragePods, podEvictor) RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted() actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount { if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount) t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)

View File

@@ -17,7 +17,6 @@ limitations under the License.
package strategies package strategies
import ( import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -29,16 +28,8 @@ import (
"k8s.io/klog" "k8s.io/klog"
) )
// RemovePodsViolatingInterPodAntiAffinity with elimination strategy // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
if !strategy.Enabled {
return
}
removePodsWithAffinityRules(ds.Client, nodes, ds.EvictLocalStoragePods, podEvictor)
}
// removePodsWithAffinityRules evicts pods on the node which are having a pod affinity rules.
func removePodsWithAffinityRules(client clientset.Interface, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
for _, node := range nodes { for _, node := range nodes {
klog.V(1).Infof("Processing node: %#v\n", node.Name) klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)

View File

@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -83,7 +84,7 @@ func TestPodAntiAffinity(t *testing.T) {
[]*v1.Node{node}, []*v1.Node{node},
) )
removePodsWithAffinityRules(fakeClient, []*v1.Node{node}, false, podEvictor) RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
podsEvicted := podEvictor.TotalEvicted() podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != test.expectedEvictedPodCount { if podsEvicted != test.expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)

View File

@@ -18,9 +18,9 @@ package strategies
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog" "k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -29,17 +29,13 @@ import (
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemovePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) { func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
if !strategy.Enabled || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { if strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
return return
} }
removePodsHavingTooManyRestarts(ds, strategy, nodes, podEvictor, ds.EvictLocalStoragePods)
}
func removePodsHavingTooManyRestarts(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, evictLocalStoragePods bool) {
for _, node := range nodes { for _, node := range nodes {
klog.V(1).Infof("Processing node: %s", node.Name) klog.V(1).Infof("Processing node: %s", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods) pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil { if err != nil {
klog.Errorf("Error when list pods at node %s", node.Name) klog.Errorf("Error when list pods at node %s", node.Name)
continue continue

View File

@@ -20,14 +20,13 @@ import (
"testing" "testing"
"fmt" "fmt"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
@@ -92,15 +91,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
} }
} }
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
pods := initPods(node)
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: pods}, nil
})
tests := []struct { tests := []struct {
description string description string
pods []v1.Pod pods []v1.Pod
@@ -165,13 +155,13 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
} }
for _, tc := range tests { for _, tc := range tests {
node := test.BuildTestNode("node1", 2000, 3000, 10, nil)
pods := initPods(node)
ds := options.DeschedulerServer{ fakeClient := &fake.Clientset{}
DeschedulerConfiguration: componentconfig.DeschedulerConfiguration{ fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
MaxNoOfPodsToEvictPerNode: tc.maxPodsToEvict, return true, &v1.PodList{Items: pods}, nil
}, })
Client: fakeClient,
}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
fakeClient, fakeClient,
@@ -181,7 +171,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
[]*v1.Node{node}, []*v1.Node{node},
) )
removePodsHavingTooManyRestarts(&ds, tc.strategy, []*v1.Node{node}, podEvictor, ds.EvictLocalStoragePods) RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted() actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount { if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)

View File

@@ -125,17 +125,15 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor
}, },
} }
ds := &options.DeschedulerServer{Client: clientset}
podEvictor := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
ds.Client, clientset,
evictionPolicyGroupVersion, evictionPolicyGroupVersion,
ds.DryRun, false,
ds.MaxNoOfPodsToEvictPerNode, 0,
nodes, nodes,
) )
strategies.LowNodeUtilization(ds, lowNodeUtilizationStrategy, nodes, podEvictor) strategies.LowNodeUtilization(clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
} }