mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 21:31:18 +01:00
React to 1.18 by adding contexts to client calls
This commit is contained in:
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -35,6 +36,7 @@ import (
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
ctx := context.Background()
|
||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -55,12 +57,12 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||
}
|
||||
|
||||
type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||
|
||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||
|
||||
@@ -78,7 +80,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
||||
}
|
||||
|
||||
wait.Until(func() {
|
||||
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||
close(stopChannel)
|
||||
@@ -101,7 +103,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
||||
|
||||
for name, f := range strategyFuncs {
|
||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||
f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||
f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
@@ -40,7 +42,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
go func() {
|
||||
err := RunDeschedulerStrategies(rs, dp, "v1beta1", stopChannel)
|
||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
@@ -48,7 +50,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Unable to list pods: %v", err)
|
||||
}
|
||||
@@ -65,7 +67,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := client.CoreV1().Nodes().Update(n1WithTaint); err != nil {
|
||||
if _, err := client.CoreV1().Nodes().Update(ctx, n1WithTaint, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to update node: %v\n", err)
|
||||
}
|
||||
|
||||
@@ -74,7 +76,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
||||
// List is better, it does not panic.
|
||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err == nil {
|
||||
if len(pods.Items) > 0 {
|
||||
return false, nil
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -82,12 +83,12 @@ func (pe *PodEvictor) TotalEvicted() int {
|
||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
||||
// is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err error) {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (success bool, err error) {
|
||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
||||
}
|
||||
|
||||
success, err = EvictPod(pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
success, err = EvictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||
if success {
|
||||
pe.nodepodCount[node]++
|
||||
klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||
@@ -98,7 +99,7 @@ func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err er
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||
func EvictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||
if dryRun {
|
||||
return true, nil
|
||||
}
|
||||
@@ -115,7 +116,7 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
|
||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
|
||||
if err == nil {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
)
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
@@ -57,7 +59,7 @@ func TestEvictPod(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
})
|
||||
got, _ := EvictPod(fakeClient, test.pod, "v1", false)
|
||||
got, _ := EvictPod(ctx, fakeClient, test.pod, "v1", false)
|
||||
if got != test.want {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||
ns, err := labels.Parse(nodeSelector)
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
@@ -43,7 +44,7 @@ func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInfor
|
||||
if len(nodes) == 0 {
|
||||
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
||||
|
||||
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
|
||||
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
||||
if err != nil {
|
||||
return []*v1.Node{}, err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -56,6 +57,7 @@ func TestReadyNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
node1.Labels = map[string]string{"type": "compute"}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
@@ -72,7 +74,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
nodes, _ := ReadyNodes(fakeClient, nodeInformer, nodeSelector, nil)
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -38,8 +39,8 @@ func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
||||
}
|
||||
|
||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(client, node)
|
||||
func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||
pods, err := ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
@@ -54,13 +55,13 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc
|
||||
return evictablePods, nil
|
||||
}
|
||||
|
||||
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
strategy api.DeschedulerStrategy,
|
||||
nodes []*v1.Node,
|
||||
@@ -40,13 +42,13 @@ func RemoveDuplicatePods(
|
||||
) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
||||
dpm := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
klog.V(1).Infof("%#v", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -59,8 +61,8 @@ func RemoveDuplicatePods(
|
||||
type duplicatePodsMap map[string][]*v1.Pod
|
||||
|
||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -31,6 +32,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// first setup pods
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
@@ -144,7 +146,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
RemoveDuplicatePods(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -39,7 +40,7 @@ type NodeUsageMap struct {
|
||||
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
@@ -59,7 +60,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
||||
return
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(client, nodes)
|
||||
npm := createNodePodsMap(ctx, client, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
||||
|
||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||
@@ -91,6 +92,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||
|
||||
evictPodsFromTargetNodes(
|
||||
ctx,
|
||||
targetNodes,
|
||||
lowNodes,
|
||||
targetThresholds,
|
||||
@@ -162,6 +164,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromTargetNodes(
|
||||
ctx context.Context,
|
||||
targetNodes, lowNodes []NodeUsageMap,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
evictLocalStoragePods bool,
|
||||
@@ -217,24 +220,26 @@ func evictPodsFromTargetNodes(
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
sortPodsBasedOnPriority(evictablePods)
|
||||
evictPods(evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
} else {
|
||||
// TODO: Remove this when we support only priority.
|
||||
// Falling back to evicting pods based on priority.
|
||||
klog.V(1).Infof("Evicting pods based on QoS")
|
||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
||||
// evict best effort pods
|
||||
evictPods(bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
evictPods(ctx, bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict burstable pods
|
||||
evictPods(burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
evictPods(ctx, burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
// evict guaranteed pods
|
||||
evictPods(guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
evictPods(ctx, guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||
}
|
||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(inputPods []*v1.Pod,
|
||||
func evictPods(
|
||||
ctx context.Context,
|
||||
inputPods []*v1.Pod,
|
||||
targetThresholds api.ResourceThresholds,
|
||||
nodeCapacity v1.ResourceList,
|
||||
nodeUsage api.ResourceThresholds,
|
||||
@@ -255,7 +260,7 @@ func evictPods(inputPods []*v1.Pod,
|
||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
|
||||
success, err := podEvictor.EvictPod(pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
@@ -325,10 +330,10 @@ func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
||||
}
|
||||
|
||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||
npm := NodePodsMap{}
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(client, node)
|
||||
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||
if err != nil {
|
||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||
} else {
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -69,6 +70,7 @@ func makeGuaranteedPod(pod *v1.Pod) {
|
||||
}
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
@@ -344,7 +346,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
npm := createNodePodsMap(fakeClient, nodes)
|
||||
npm := createNodePodsMap(ctx, fakeClient, nodes)
|
||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
||||
if len(lowNodes) != 1 {
|
||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||
@@ -357,7 +359,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nodes,
|
||||
)
|
||||
|
||||
evictPodsFromTargetNodes(targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
||||
evictPodsFromTargetNodes(ctx, targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
@@ -504,6 +506,7 @@ func newFake(objects ...runtime.Object) *core.Fake {
|
||||
}
|
||||
|
||||
func TestWithTaints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
@@ -629,7 +632,7 @@ func TestWithTaints(t *testing.T) {
|
||||
item.nodes,
|
||||
)
|
||||
|
||||
LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||
LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||
|
||||
if item.evictionsExpected != evictionCounter {
|
||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
@@ -27,7 +28,7 @@ import (
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||
|
||||
@@ -36,7 +37,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||
}
|
||||
@@ -45,7 +46,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De
|
||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||
if _, err := podEvictor.EvictPod(pod, node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -29,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: api.StrategyParameters{
|
||||
@@ -158,7 +159,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -28,10 +29,10 @@ import (
|
||||
)
|
||||
|
||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||
func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
//no pods evicted as error encountered retrieving evictable Pods
|
||||
return
|
||||
@@ -44,7 +45,7 @@ func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.Desc
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -43,7 +44,7 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
@@ -171,7 +172,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
tc.nodes,
|
||||
)
|
||||
|
||||
RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -29,17 +30,17 @@ import (
|
||||
)
|
||||
|
||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||
func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
totalPods := len(pods)
|
||||
for i := 0; i < totalPods; i++ {
|
||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||
success, err := podEvictor.EvictPod(pods[i], node)
|
||||
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||
@@ -84,7 +86,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != test.expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||
func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
||||
return
|
||||
@@ -36,9 +37,9 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||
pods := listOldPodsOnNode(client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
||||
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
||||
for _, pod := range pods {
|
||||
success, err := podEvictor.EvictPod(pod, node)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||
if success {
|
||||
klog.V(1).Infof("Evicted pod: %#v\n because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||
}
|
||||
@@ -51,8 +52,8 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n
|
||||
}
|
||||
}
|
||||
|
||||
func listOldPodsOnNode(client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -31,6 +32,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPodLifeTime(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
||||
newerPodCreationTime := metav1.NewTime(time.Now())
|
||||
@@ -157,7 +159,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
PodLifeTime(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if podsEvicted != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
@@ -29,14 +30,14 @@ import (
|
||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
||||
return
|
||||
}
|
||||
for _, node := range nodes {
|
||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
||||
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||
if err != nil {
|
||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||
continue
|
||||
@@ -51,7 +52,7 @@ func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.De
|
||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||
continue
|
||||
}
|
||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
@@ -79,6 +80,7 @@ func initPods(node *v1.Node) []v1.Pod {
|
||||
}
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
Enabled: enabled,
|
||||
@@ -171,7 +173,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
[]*v1.Node{node},
|
||||
)
|
||||
|
||||
RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||
|
||||
Reference in New Issue
Block a user