diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 748fdfe9e..e6c83f862 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -17,6 +17,7 @@ limitations under the License. package descheduler import ( + "context" "fmt" "k8s.io/api/core/v1" @@ -35,6 +36,7 @@ import ( ) func Run(rs *options.DeschedulerServer) error { + ctx := context.Background() rsclient, err := client.CreateClient(rs.KubeconfigFile) if err != nil { return err @@ -55,12 +57,12 @@ func Run(rs *options.DeschedulerServer) error { } stopChannel := make(chan struct{}) - return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) + return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel) } -type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) +type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) -func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { +func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error { sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0) nodeInformer := sharedInformerFactory.Core().V1().Nodes() @@ -78,7 +80,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy * } wait.Until(func() { - nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel) + nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel) if err != nil { klog.V(1).Infof("Unable to get ready nodes: %v", err) close(stopChannel) @@ -101,7 +103,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy * for name, f := range strategyFuncs { if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled { - f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor) + f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor) } } diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 883f8f0b5..6b8fc3b49 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -1,6 +1,7 @@ package descheduler import ( + "context" "fmt" "strings" "testing" @@ -16,6 +17,7 @@ import ( ) func TestTaintsUpdated(t *testing.T) { + ctx := context.Background() n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) @@ -40,7 +42,7 @@ func TestTaintsUpdated(t *testing.T) { rs.Client = client rs.DeschedulingInterval = 100 * time.Millisecond go func() { - err := RunDeschedulerStrategies(rs, dp, "v1beta1", stopChannel) + err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel) if err != nil { t.Fatalf("Unable to run descheduler strategies: %v", err) } @@ -48,7 +50,7 @@ func TestTaintsUpdated(t *testing.T) { // Wait for few cycles and then verify the only pod still exists time.Sleep(300 * time.Millisecond) - pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{}) + pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Unable to list pods: %v", err) } @@ -65,7 +67,7 @@ func TestTaintsUpdated(t *testing.T) { }, } - if _, err := client.CoreV1().Nodes().Update(n1WithTaint); err != nil { + if _, err := client.CoreV1().Nodes().Update(ctx, n1WithTaint, metav1.UpdateOptions{}); err != nil { t.Fatalf("Unable to update node: %v\n", err) } @@ -74,7 +76,7 @@ func TestTaintsUpdated(t *testing.T) { //pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{}) // List is better, it does not panic. // Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod" - pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{}) + pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) if err == nil { if len(pods.Items) > 0 { return false, nil diff --git a/pkg/descheduler/evictions/evictions.go b/pkg/descheduler/evictions/evictions.go index 554396db4..2e400fac6 100644 --- a/pkg/descheduler/evictions/evictions.go +++ b/pkg/descheduler/evictions/evictions.go @@ -17,6 +17,7 @@ limitations under the License. package evictions import ( + "context" "fmt" v1 "k8s.io/api/core/v1" @@ -82,12 +83,12 @@ func (pe *PodEvictor) TotalEvicted() int { // EvictPod returns non-nil error only when evicting a pod on a node is not // possible (due to maxPodsToEvict constraint). Success is true when the pod // is evicted on the server side. -func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err error) { +func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (success bool, err error) { if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict { return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name) } - success, err = EvictPod(pe.client, pod, pe.policyGroupVersion, pe.dryRun) + success, err = EvictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun) if success { pe.nodepodCount[node]++ klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err) @@ -98,7 +99,7 @@ func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err er return false, nil } -func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) { +func EvictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) { if dryRun { return true, nil } @@ -115,7 +116,7 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string }, DeleteOptions: deleteOptions, } - err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction) + err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction) if err == nil { eventBroadcaster := record.NewBroadcaster() diff --git a/pkg/descheduler/evictions/evictions_test.go b/pkg/descheduler/evictions/evictions_test.go index a8624b3c7..a1e87f8fe 100644 --- a/pkg/descheduler/evictions/evictions_test.go +++ b/pkg/descheduler/evictions/evictions_test.go @@ -17,6 +17,7 @@ limitations under the License. package evictions import ( + "context" "testing" "k8s.io/api/core/v1" @@ -27,6 +28,7 @@ import ( ) func TestEvictPod(t *testing.T) { + ctx := context.Background() node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil) tests := []struct { @@ -57,7 +59,7 @@ func TestEvictPod(t *testing.T) { fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { return true, &v1.PodList{Items: test.pods}, nil }) - got, _ := EvictPod(fakeClient, test.pod, "v1", false) + got, _ := EvictPod(ctx, fakeClient, test.pod, "v1", false) if got != test.want { t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got) } diff --git a/pkg/descheduler/node/node.go b/pkg/descheduler/node/node.go index 3fd6125eb..b16924c5c 100644 --- a/pkg/descheduler/node/node.go +++ b/pkg/descheduler/node/node.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -28,7 +29,7 @@ import ( // ReadyNodes returns ready nodes irrespective of whether they are // schedulable or not. -func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) { +func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) { ns, err := labels.Parse(nodeSelector) if err != nil { return []*v1.Node{}, err @@ -43,7 +44,7 @@ func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInfor if len(nodes) == 0 { klog.V(2).Infof("node lister returned empty list, now fetch directly") - nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector}) + nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector}) if err != nil { return []*v1.Node{}, err } diff --git a/pkg/descheduler/node/node_test.go b/pkg/descheduler/node/node_test.go index 1e3ea9cf8..95af44ad9 100644 --- a/pkg/descheduler/node/node_test.go +++ b/pkg/descheduler/node/node_test.go @@ -17,6 +17,7 @@ limitations under the License. package node import ( + "context" "testing" "k8s.io/api/core/v1" @@ -56,6 +57,7 @@ func TestReadyNodes(t *testing.T) { } func TestReadyNodesWithNodeSelector(t *testing.T) { + ctx := context.Background() node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) node1.Labels = map[string]string{"type": "compute"} node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil) @@ -72,7 +74,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) { sharedInformerFactory.WaitForCacheSync(stopChannel) defer close(stopChannel) - nodes, _ := ReadyNodes(fakeClient, nodeInformer, nodeSelector, nil) + nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil) if nodes[0].Name != "node1" { t.Errorf("Expected node1, got %s", nodes[0].Name) diff --git a/pkg/descheduler/pod/pods.go b/pkg/descheduler/pod/pods.go index 37a96aa7e..4978e6215 100644 --- a/pkg/descheduler/pod/pods.go +++ b/pkg/descheduler/pod/pods.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" @@ -38,8 +39,8 @@ func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool { } // ListEvictablePodsOnNode returns the list of evictable pods on node. -func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) { - pods, err := ListPodsOnANode(client, node) +func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) { + pods, err := ListPodsOnANode(ctx, client, node) if err != nil { return []*v1.Pod{}, err } @@ -54,13 +55,13 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc return evictablePods, nil } -func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) { +func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) { fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) if err != nil { return []*v1.Pod{}, err } - podList, err := client.CoreV1().Pods(v1.NamespaceAll).List( + podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx, metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { return []*v1.Pod{}, err diff --git a/pkg/descheduler/strategies/duplicates.go b/pkg/descheduler/strategies/duplicates.go index 885724dc1..2bc6541ac 100644 --- a/pkg/descheduler/strategies/duplicates.go +++ b/pkg/descheduler/strategies/duplicates.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "strings" "k8s.io/api/core/v1" @@ -32,6 +33,7 @@ import ( // A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same // namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. func RemoveDuplicatePods( + ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, @@ -40,13 +42,13 @@ func RemoveDuplicatePods( ) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods) + dpm := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods) for creator, pods := range dpm { if len(pods) > 1 { klog.V(1).Infof("%#v", creator) // i = 0 does not evict the first pod for i := 1; i < len(pods); i++ { - if _, err := podEvictor.EvictPod(pods[i], node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil { break } } @@ -59,8 +61,8 @@ func RemoveDuplicatePods( type duplicatePodsMap map[string][]*v1.Pod // listDuplicatePodsOnANode lists duplicate pods on a given node. -func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap { - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) +func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap { + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { return nil } diff --git a/pkg/descheduler/strategies/duplicates_test.go b/pkg/descheduler/strategies/duplicates_test.go index d2ccd92ee..2d781d054 100644 --- a/pkg/descheduler/strategies/duplicates_test.go +++ b/pkg/descheduler/strategies/duplicates_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "testing" "k8s.io/api/core/v1" @@ -31,6 +32,7 @@ import ( ) func TestFindDuplicatePods(t *testing.T) { + ctx := context.Background() // first setup pods node := test.BuildTestNode("n1", 2000, 3000, 10, nil) p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil) @@ -144,7 +146,7 @@ func TestFindDuplicatePods(t *testing.T) { []*v1.Node{node}, ) - RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) + RemoveDuplicatePods(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != testCase.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted) diff --git a/pkg/descheduler/strategies/lownodeutilization.go b/pkg/descheduler/strategies/lownodeutilization.go index ce3b4dd06..9fed3fdfe 100644 --- a/pkg/descheduler/strategies/lownodeutilization.go +++ b/pkg/descheduler/strategies/lownodeutilization.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "sort" v1 "k8s.io/api/core/v1" @@ -39,7 +40,7 @@ type NodeUsageMap struct { type NodePodsMap map[*v1.Node][]*v1.Pod -func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { if !strategy.Enabled { return } @@ -59,7 +60,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra return } - npm := createNodePodsMap(client, nodes) + npm := createNodePodsMap(ctx, client, nodes) lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods) klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v", @@ -91,6 +92,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes)) evictPodsFromTargetNodes( + ctx, targetNodes, lowNodes, targetThresholds, @@ -162,6 +164,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr // evicts them based on QoS as fallback option. // TODO: @ravig Break this function into smaller functions. func evictPodsFromTargetNodes( + ctx context.Context, targetNodes, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, evictLocalStoragePods bool, @@ -217,24 +220,26 @@ func evictPodsFromTargetNodes( // sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers. sortPodsBasedOnPriority(evictablePods) - evictPods(evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) + evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) } else { // TODO: Remove this when we support only priority. // Falling back to evicting pods based on priority. klog.V(1).Infof("Evicting pods based on QoS") klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods)) // evict best effort pods - evictPods(bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) + evictPods(ctx, bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) // evict burstable pods - evictPods(burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) + evictPods(ctx, burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) // evict guaranteed pods - evictPods(guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) + evictPods(ctx, guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node) } klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage) } } -func evictPods(inputPods []*v1.Pod, +func evictPods( + ctx context.Context, + inputPods []*v1.Pod, targetThresholds api.ResourceThresholds, nodeCapacity v1.ResourceList, nodeUsage api.ResourceThresholds, @@ -255,7 +260,7 @@ func evictPods(inputPods []*v1.Pod, cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU) mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory) - success, err := podEvictor.EvictPod(pod, node) + success, err := podEvictor.EvictPod(ctx, pod, node) if err != nil { break } @@ -325,10 +330,10 @@ func sortPodsBasedOnPriority(evictablePods []*v1.Pod) { } // createNodePodsMap returns nodepodsmap with evictable pods on node. -func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap { +func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap { npm := NodePodsMap{} for _, node := range nodes { - pods, err := podutil.ListPodsOnANode(client, node) + pods, err := podutil.ListPodsOnANode(ctx, client, node) if err != nil { klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err) } else { diff --git a/pkg/descheduler/strategies/lownodeutilization_test.go b/pkg/descheduler/strategies/lownodeutilization_test.go index a6aa22ac3..78441d715 100644 --- a/pkg/descheduler/strategies/lownodeutilization_test.go +++ b/pkg/descheduler/strategies/lownodeutilization_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "fmt" "strings" "testing" @@ -69,6 +70,7 @@ func makeGuaranteedPod(pod *v1.Pod) { } func TestLowNodeUtilization(t *testing.T) { + ctx := context.Background() n1NodeName := "n1" n2NodeName := "n2" n3NodeName := "n3" @@ -344,7 +346,7 @@ func TestLowNodeUtilization(t *testing.T) { nodes = append(nodes, node) } - npm := createNodePodsMap(fakeClient, nodes) + npm := createNodePodsMap(ctx, fakeClient, nodes) lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false) if len(lowNodes) != 1 { t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.") @@ -357,7 +359,7 @@ func TestLowNodeUtilization(t *testing.T) { nodes, ) - evictPodsFromTargetNodes(targetNodes, lowNodes, test.targetThresholds, false, podEvictor) + evictPodsFromTargetNodes(ctx, targetNodes, lowNodes, test.targetThresholds, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if test.expectedPodsEvicted != podsEvicted { t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted) @@ -504,6 +506,7 @@ func newFake(objects ...runtime.Object) *core.Fake { } func TestWithTaints(t *testing.T) { + ctx := context.Background() strategy := api.DeschedulerStrategy{ Enabled: true, Params: api.StrategyParameters{ @@ -629,7 +632,7 @@ func TestWithTaints(t *testing.T) { item.nodes, ) - LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor) + LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor) if item.evictionsExpected != evictionCounter { t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter) diff --git a/pkg/descheduler/strategies/node_affinity.go b/pkg/descheduler/strategies/node_affinity.go index b8783e836..fc122d717 100644 --- a/pkg/descheduler/strategies/node_affinity.go +++ b/pkg/descheduler/strategies/node_affinity.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" @@ -27,7 +28,7 @@ import ( podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" ) -func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, nodeAffinity := range strategy.Params.NodeAffinityType { klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity) @@ -36,7 +37,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { klog.Errorf("failed to get pods from %v: %v", node.Name, err) } @@ -45,7 +46,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) { klog.V(1).Infof("Evicting pod: %v", pod.Name) - if _, err := podEvictor.EvictPod(pod, node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil { break } } diff --git a/pkg/descheduler/strategies/node_affinity_test.go b/pkg/descheduler/strategies/node_affinity_test.go index 57cabdec8..06e5f5430 100644 --- a/pkg/descheduler/strategies/node_affinity_test.go +++ b/pkg/descheduler/strategies/node_affinity_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "testing" "k8s.io/api/core/v1" @@ -29,7 +30,7 @@ import ( ) func TestRemovePodsViolatingNodeAffinity(t *testing.T) { - + ctx := context.Background() requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{ Enabled: true, Params: api.StrategyParameters{ @@ -158,7 +159,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) { tc.nodes, ) - RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor) + RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, false, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/pkg/descheduler/strategies/node_taint.go b/pkg/descheduler/strategies/node_taint.go index 955760e28..691742f3c 100644 --- a/pkg/descheduler/strategies/node_taint.go +++ b/pkg/descheduler/strategies/node_taint.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -28,10 +29,10 @@ import ( ) // RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes -func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { //no pods evicted as error encountered retrieving evictable Pods return @@ -44,7 +45,7 @@ func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.Desc func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule }, ) { klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name) - if _, err := podEvictor.EvictPod(pods[i], node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil { break } } diff --git a/pkg/descheduler/strategies/node_taint_test.go b/pkg/descheduler/strategies/node_taint_test.go index d6e1f5e02..46c89c5fd 100644 --- a/pkg/descheduler/strategies/node_taint_test.go +++ b/pkg/descheduler/strategies/node_taint_test.go @@ -1,6 +1,7 @@ package strategies import ( + "context" "fmt" "testing" @@ -43,7 +44,7 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod { } func TestDeletePodsViolatingNodeTaints(t *testing.T) { - + ctx := context.Background() node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) node1 = addTaintsToNode(node1, "testTaint", "test", []int{1}) node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) @@ -171,7 +172,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) { tc.nodes, ) - RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor) + RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/pod_antiaffinity.go b/pkg/descheduler/strategies/pod_antiaffinity.go index cb787d940..7bfadada1 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity.go +++ b/pkg/descheduler/strategies/pod_antiaffinity.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/evictions" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" @@ -29,17 +30,17 @@ import ( ) // RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules. -func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { for _, node := range nodes { klog.V(1).Infof("Processing node: %#v\n", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { return } totalPods := len(pods) for i := 0; i < totalPods; i++ { if checkPodsWithAntiAffinityExist(pods[i], pods) { - success, err := podEvictor.EvictPod(pods[i], node) + success, err := podEvictor.EvictPod(ctx, pods[i], node) if err != nil { break } diff --git a/pkg/descheduler/strategies/pod_antiaffinity_test.go b/pkg/descheduler/strategies/pod_antiaffinity_test.go index f3c9c2100..4c4febda3 100644 --- a/pkg/descheduler/strategies/pod_antiaffinity_test.go +++ b/pkg/descheduler/strategies/pod_antiaffinity_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "testing" "k8s.io/api/core/v1" @@ -30,6 +31,7 @@ import ( ) func TestPodAntiAffinity(t *testing.T) { + ctx := context.Background() node := test.BuildTestNode("n1", 2000, 3000, 10, nil) p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil) p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil) @@ -84,7 +86,7 @@ func TestPodAntiAffinity(t *testing.T) { []*v1.Node{node}, ) - RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) + RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != test.expectedEvictedPodCount { t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount) diff --git a/pkg/descheduler/strategies/pod_lifetime.go b/pkg/descheduler/strategies/pod_lifetime.go index ad1f5d9d2..41a08ad17 100644 --- a/pkg/descheduler/strategies/pod_lifetime.go +++ b/pkg/descheduler/strategies/pod_lifetime.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" v1 "k8s.io/api/core/v1" v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -28,7 +29,7 @@ import ( ) // PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago. -func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { if strategy.Params.MaxPodLifeTimeSeconds == nil { klog.V(1).Infof("MaxPodLifeTimeSeconds not set") return @@ -36,9 +37,9 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n for _, node := range nodes { klog.V(1).Infof("Processing node: %#v", node.Name) - pods := listOldPodsOnNode(client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods) + pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods) for _, pod := range pods { - success, err := podEvictor.EvictPod(pod, node) + success, err := podEvictor.EvictPod(ctx, pod, node) if success { klog.V(1).Infof("Evicted pod: %#v\n because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds) } @@ -51,8 +52,8 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n } } -func listOldPodsOnNode(client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod { - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) +func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod { + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { return nil } diff --git a/pkg/descheduler/strategies/pod_lifetime_test.go b/pkg/descheduler/strategies/pod_lifetime_test.go index 062943e76..b389551b8 100644 --- a/pkg/descheduler/strategies/pod_lifetime_test.go +++ b/pkg/descheduler/strategies/pod_lifetime_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "testing" "time" @@ -31,6 +32,7 @@ import ( ) func TestPodLifeTime(t *testing.T) { + ctx := context.Background() node := test.BuildTestNode("n1", 2000, 3000, 10, nil) olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)) newerPodCreationTime := metav1.NewTime(time.Now()) @@ -157,7 +159,7 @@ func TestPodLifeTime(t *testing.T) { []*v1.Node{node}, ) - PodLifeTime(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor) + PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor) podsEvicted := podEvictor.TotalEvicted() if podsEvicted != tc.expectedEvictedPodCount { t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted) diff --git a/pkg/descheduler/strategies/toomanyrestarts.go b/pkg/descheduler/strategies/toomanyrestarts.go index 345778cba..ba5f5fcc0 100644 --- a/pkg/descheduler/strategies/toomanyrestarts.go +++ b/pkg/descheduler/strategies/toomanyrestarts.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" @@ -29,14 +30,14 @@ import ( // RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node. // There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings. // As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. -func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { +func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) { if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 { klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set") return } for _, node := range nodes { klog.V(1).Infof("Processing node: %s", node.Name) - pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods) + pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods) if err != nil { klog.Errorf("Error when list pods at node %s", node.Name) continue @@ -51,7 +52,7 @@ func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.De } else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold { continue } - if _, err := podEvictor.EvictPod(pods[i], node); err != nil { + if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil { break } } diff --git a/pkg/descheduler/strategies/toomanyrestarts_test.go b/pkg/descheduler/strategies/toomanyrestarts_test.go index bc2bf1cb2..b68863a87 100644 --- a/pkg/descheduler/strategies/toomanyrestarts_test.go +++ b/pkg/descheduler/strategies/toomanyrestarts_test.go @@ -17,6 +17,7 @@ limitations under the License. package strategies import ( + "context" "testing" "fmt" @@ -79,6 +80,7 @@ func initPods(node *v1.Node) []v1.Pod { } func TestRemovePodsHavingTooManyRestarts(t *testing.T) { + ctx := context.Background() createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy { return api.DeschedulerStrategy{ Enabled: enabled, @@ -171,7 +173,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) { []*v1.Node{node}, ) - RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor) + RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor) actualEvictedPodCount := podEvictor.TotalEvicted() if actualEvictedPodCount != tc.expectedEvictedPodCount { t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index bff92ef28..5e0c906f6 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "math" "testing" "time" @@ -95,7 +96,7 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr } // startEndToEndForLowNodeUtilization tests the lownode utilization strategy. -func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) { +func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) { var thresholds = make(deschedulerapi.ResourceThresholds) var targetThresholds = make(deschedulerapi.ResourceThresholds) thresholds[v1.ResourceMemory] = 20 @@ -110,7 +111,7 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor klog.Fatalf("%v", err) } stopChannel := make(chan struct{}) - nodes, err := nodeutil.ReadyNodes(clientset, nodeInformer, "", stopChannel) + nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel) if err != nil { klog.Fatalf("%v", err) } @@ -133,18 +134,19 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor nodes, ) - strategies.LowNodeUtilization(clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor) + strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor) time.Sleep(10 * time.Second) } func TestE2E(t *testing.T) { // If we have reached here, it means cluster would have been already setup and the kubeconfig file should // be in /tmp directory as admin.conf. + ctx := context.Background() clientSet, err := client.CreateClient("/tmp/admin.conf") if err != nil { t.Errorf("Error during client creation with %v", err) } - nodeList, err := clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { t.Errorf("Error listing node with %v", err) } @@ -159,11 +161,11 @@ func TestE2E(t *testing.T) { // Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node. // So, the last node would have least utilization. rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil) - _, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc) + _, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Errorf("Error creating deployment %v", err) } - evictPods(t, clientSet, nodeInformer, nodeList, rc) + evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc) rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"} rc.Spec.Replicas = func(i int32) *int32 { return &i }(15) @@ -176,14 +178,15 @@ func TestE2E(t *testing.T) { }, }, } - _, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc) + _, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{}) if err != nil { t.Errorf("Error creating deployment %v", err) } - evictPods(t, clientSet, nodeInformer, nodeList, rc) + evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc) } func TestDeschedulingInterval(t *testing.T) { + ctx := context.Background() clientSet, err := client.CreateClient("/tmp/admin.conf") if err != nil { t.Errorf("Error during client creation with %v", err) @@ -203,7 +206,7 @@ func TestDeschedulingInterval(t *testing.T) { } stopChannel := make(chan struct{}) - if err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil { + if err := descheduler.RunDeschedulerStrategies(ctx, s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil { t.Errorf("Error running descheduler strategies: %+v", err) } c <- true @@ -217,7 +220,7 @@ func TestDeschedulingInterval(t *testing.T) { } } -func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) { +func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) { var leastLoadedNode v1.Node podsBefore := math.MaxInt16 for i := range nodeList.Items { @@ -226,7 +229,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf continue } // List all the pods on the current Node - podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true) + podsOnANode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &nodeList.Items[i], true) if err != nil { t.Errorf("Error listing pods on a node %v", err) } @@ -237,8 +240,8 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf } } t.Log("Eviction of pods starting") - startEndToEndForLowNodeUtilization(clientSet, nodeInformer) - podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true) + startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer) + podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &leastLoadedNode, true) if err != nil { t.Errorf("Error listing pods on a node %v", err) } @@ -249,14 +252,14 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf //set number of replicas to 0 rc.Spec.Replicas = func(i int32) *int32 { return &i }(0) - _, err = clientSet.CoreV1().ReplicationControllers("default").Update(rc) + _, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{}) if err != nil { t.Errorf("Error updating replica controller %v", err) } allPodsDeleted := false //wait 30 seconds until all pods are deleted for i := 0; i < 6; i++ { - scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(rc.Name, metav1.GetOptions{}) + scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{}) if scale.Spec.Replicas == 0 { allPodsDeleted = true break @@ -268,7 +271,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf t.Errorf("Deleting of rc pods took too long") } - err = clientSet.CoreV1().ReplicationControllers("default").Delete(rc.Name, &metav1.DeleteOptions{}) + err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{}) if err != nil { t.Errorf("Error deleting rc %v", err) }