mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
React to 1.18 by adding contexts to client calls
This commit is contained in:
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package descheduler
|
package descheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -35,6 +36,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func Run(rs *options.DeschedulerServer) error {
|
func Run(rs *options.DeschedulerServer) error {
|
||||||
|
ctx := context.Background()
|
||||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -55,12 +57,12 @@ func Run(rs *options.DeschedulerServer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
return RunDeschedulerStrategies(rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
|
||||||
}
|
}
|
||||||
|
|
||||||
type strategyFunction func(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor)
|
||||||
|
|
||||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
|
||||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
|
||||||
|
|
||||||
@@ -78,7 +80,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
|||||||
}
|
}
|
||||||
|
|
||||||
wait.Until(func() {
|
wait.Until(func() {
|
||||||
nodes, err := nodeutil.ReadyNodes(rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, rs.NodeSelector, stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
klog.V(1).Infof("Unable to get ready nodes: %v", err)
|
||||||
close(stopChannel)
|
close(stopChannel)
|
||||||
@@ -101,7 +103,7 @@ func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *
|
|||||||
|
|
||||||
for name, f := range strategyFuncs {
|
for name, f := range strategyFuncs {
|
||||||
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
if strategy := deschedulerPolicy.Strategies[api.StrategyName(name)]; strategy.Enabled {
|
||||||
f(rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
f(ctx, rs.Client, strategy, nodes, rs.EvictLocalStoragePods, podEvictor)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package descheduler
|
package descheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -16,6 +17,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestTaintsUpdated(t *testing.T) {
|
func TestTaintsUpdated(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
|
|
||||||
@@ -40,7 +42,7 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
rs.Client = client
|
rs.Client = client
|
||||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||||
go func() {
|
go func() {
|
||||||
err := RunDeschedulerStrategies(rs, dp, "v1beta1", stopChannel)
|
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1", stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||||
}
|
}
|
||||||
@@ -48,7 +50,7 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
|
|
||||||
// Wait for few cycles and then verify the only pod still exists
|
// Wait for few cycles and then verify the only pod still exists
|
||||||
time.Sleep(300 * time.Millisecond)
|
time.Sleep(300 * time.Millisecond)
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Unable to list pods: %v", err)
|
t.Errorf("Unable to list pods: %v", err)
|
||||||
}
|
}
|
||||||
@@ -65,7 +67,7 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.CoreV1().Nodes().Update(n1WithTaint); err != nil {
|
if _, err := client.CoreV1().Nodes().Update(ctx, n1WithTaint, metav1.UpdateOptions{}); err != nil {
|
||||||
t.Fatalf("Unable to update node: %v\n", err)
|
t.Fatalf("Unable to update node: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -74,7 +76,7 @@ func TestTaintsUpdated(t *testing.T) {
|
|||||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
||||||
// List is better, it does not panic.
|
// List is better, it does not panic.
|
||||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
||||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(metav1.ListOptions{})
|
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(pods.Items) > 0 {
|
if len(pods.Items) > 0 {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package evictions
|
package evictions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@@ -82,12 +83,12 @@ func (pe *PodEvictor) TotalEvicted() int {
|
|||||||
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
// EvictPod returns non-nil error only when evicting a pod on a node is not
|
||||||
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
// possible (due to maxPodsToEvict constraint). Success is true when the pod
|
||||||
// is evicted on the server side.
|
// is evicted on the server side.
|
||||||
func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err error) {
|
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node) (success bool, err error) {
|
||||||
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
if pe.maxPodsToEvict > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvict {
|
||||||
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvict, node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
success, err = EvictPod(pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
success, err = EvictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
|
||||||
if success {
|
if success {
|
||||||
pe.nodepodCount[node]++
|
pe.nodepodCount[node]++
|
||||||
klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
klog.V(1).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
|
||||||
@@ -98,7 +99,7 @@ func (pe *PodEvictor) EvictPod(pod *v1.Pod, node *v1.Node) (success bool, err er
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
func EvictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||||
if dryRun {
|
if dryRun {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@@ -115,7 +116,7 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
|
|||||||
},
|
},
|
||||||
DeleteOptions: deleteOptions,
|
DeleteOptions: deleteOptions,
|
||||||
}
|
}
|
||||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
|
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package evictions
|
package evictions
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -27,6 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestEvictPod(t *testing.T) {
|
func TestEvictPod(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -57,7 +59,7 @@ func TestEvictPod(t *testing.T) {
|
|||||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, &v1.PodList{Items: test.pods}, nil
|
return true, &v1.PodList{Items: test.pods}, nil
|
||||||
})
|
})
|
||||||
got, _ := EvictPod(fakeClient, test.pod, "v1", false)
|
got, _ := EvictPod(ctx, fakeClient, test.pod, "v1", false)
|
||||||
if got != test.want {
|
if got != test.want {
|
||||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@@ -28,7 +29,7 @@ import (
|
|||||||
|
|
||||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||||
// schedulable or not.
|
// schedulable or not.
|
||||||
func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeSelector string, stopChannel <-chan struct{}) ([]*v1.Node, error) {
|
||||||
ns, err := labels.Parse(nodeSelector)
|
ns, err := labels.Parse(nodeSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
@@ -43,7 +44,7 @@ func ReadyNodes(client clientset.Interface, nodeInformer coreinformers.NodeInfor
|
|||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
klog.V(2).Infof("node lister returned empty list, now fetch directly")
|
||||||
|
|
||||||
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
|
nItems, err := client.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: nodeSelector})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Node{}, err
|
return []*v1.Node{}, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package node
|
package node
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -56,6 +57,7 @@ func TestReadyNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||||
node1.Labels = map[string]string{"type": "compute"}
|
node1.Labels = map[string]string{"type": "compute"}
|
||||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||||
@@ -72,7 +74,7 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
|||||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||||
defer close(stopChannel)
|
defer close(stopChannel)
|
||||||
|
|
||||||
nodes, _ := ReadyNodes(fakeClient, nodeInformer, nodeSelector, nil)
|
nodes, _ := ReadyNodes(ctx, fakeClient, nodeInformer, nodeSelector, nil)
|
||||||
|
|
||||||
if nodes[0].Name != "node1" {
|
if nodes[0].Name != "node1" {
|
||||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package pod
|
package pod
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
@@ -38,8 +39,8 @@ func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
// ListEvictablePodsOnNode returns the list of evictable pods on node.
|
||||||
func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
func ListEvictablePodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) {
|
||||||
pods, err := ListPodsOnANode(client, node)
|
pods, err := ListPodsOnANode(ctx, client, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
}
|
}
|
||||||
@@ -54,13 +55,13 @@ func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLoc
|
|||||||
return evictablePods, nil
|
return evictablePods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
func ListPodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) {
|
||||||
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(
|
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
|
||||||
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
metav1.ListOptions{FieldSelector: fieldSelector.String()})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*v1.Pod{}, err
|
return []*v1.Pod{}, err
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -32,6 +33,7 @@ import (
|
|||||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
func RemoveDuplicatePods(
|
func RemoveDuplicatePods(
|
||||||
|
ctx context.Context,
|
||||||
client clientset.Interface,
|
client clientset.Interface,
|
||||||
strategy api.DeschedulerStrategy,
|
strategy api.DeschedulerStrategy,
|
||||||
nodes []*v1.Node,
|
nodes []*v1.Node,
|
||||||
@@ -40,13 +42,13 @@ func RemoveDuplicatePods(
|
|||||||
) {
|
) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||||
dpm := listDuplicatePodsOnANode(client, node, evictLocalStoragePods)
|
dpm := listDuplicatePodsOnANode(ctx, client, node, evictLocalStoragePods)
|
||||||
for creator, pods := range dpm {
|
for creator, pods := range dpm {
|
||||||
if len(pods) > 1 {
|
if len(pods) > 1 {
|
||||||
klog.V(1).Infof("%#v", creator)
|
klog.V(1).Infof("%#v", creator)
|
||||||
// i = 0 does not evict the first pod
|
// i = 0 does not evict the first pod
|
||||||
for i := 1; i < len(pods); i++ {
|
for i := 1; i < len(pods); i++ {
|
||||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -59,8 +61,8 @@ func RemoveDuplicatePods(
|
|||||||
type duplicatePodsMap map[string][]*v1.Pod
|
type duplicatePodsMap map[string][]*v1.Pod
|
||||||
|
|
||||||
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
// listDuplicatePodsOnANode lists duplicate pods on a given node.
|
||||||
func listDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
func listDuplicatePodsOnANode(ctx context.Context, client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) duplicatePodsMap {
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -31,6 +32,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestFindDuplicatePods(t *testing.T) {
|
func TestFindDuplicatePods(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
// first setup pods
|
// first setup pods
|
||||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||||
@@ -144,7 +146,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
|||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
)
|
)
|
||||||
|
|
||||||
RemoveDuplicatePods(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
RemoveDuplicatePods(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != testCase.expectedEvictedPodCount {
|
if podsEvicted != testCase.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@@ -39,7 +40,7 @@ type NodeUsageMap struct {
|
|||||||
|
|
||||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||||
|
|
||||||
func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
if !strategy.Enabled {
|
if !strategy.Enabled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -59,7 +60,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
npm := createNodePodsMap(client, nodes)
|
npm := createNodePodsMap(ctx, client, nodes)
|
||||||
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, evictLocalStoragePods)
|
||||||
|
|
||||||
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
|
||||||
@@ -91,6 +92,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
|||||||
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
|
||||||
|
|
||||||
evictPodsFromTargetNodes(
|
evictPodsFromTargetNodes(
|
||||||
|
ctx,
|
||||||
targetNodes,
|
targetNodes,
|
||||||
lowNodes,
|
lowNodes,
|
||||||
targetThresholds,
|
targetThresholds,
|
||||||
@@ -162,6 +164,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
|||||||
// evicts them based on QoS as fallback option.
|
// evicts them based on QoS as fallback option.
|
||||||
// TODO: @ravig Break this function into smaller functions.
|
// TODO: @ravig Break this function into smaller functions.
|
||||||
func evictPodsFromTargetNodes(
|
func evictPodsFromTargetNodes(
|
||||||
|
ctx context.Context,
|
||||||
targetNodes, lowNodes []NodeUsageMap,
|
targetNodes, lowNodes []NodeUsageMap,
|
||||||
targetThresholds api.ResourceThresholds,
|
targetThresholds api.ResourceThresholds,
|
||||||
evictLocalStoragePods bool,
|
evictLocalStoragePods bool,
|
||||||
@@ -217,24 +220,26 @@ func evictPodsFromTargetNodes(
|
|||||||
|
|
||||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||||
sortPodsBasedOnPriority(evictablePods)
|
sortPodsBasedOnPriority(evictablePods)
|
||||||
evictPods(evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
evictPods(ctx, evictablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||||
} else {
|
} else {
|
||||||
// TODO: Remove this when we support only priority.
|
// TODO: Remove this when we support only priority.
|
||||||
// Falling back to evicting pods based on priority.
|
// Falling back to evicting pods based on priority.
|
||||||
klog.V(1).Infof("Evicting pods based on QoS")
|
klog.V(1).Infof("Evicting pods based on QoS")
|
||||||
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
klog.V(1).Infof("There are %v non-evictable pods on the node", len(nonRemovablePods))
|
||||||
// evict best effort pods
|
// evict best effort pods
|
||||||
evictPods(bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
evictPods(ctx, bestEffortPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||||
// evict burstable pods
|
// evict burstable pods
|
||||||
evictPods(burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
evictPods(ctx, burstablePods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||||
// evict guaranteed pods
|
// evict guaranteed pods
|
||||||
evictPods(guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
evictPods(ctx, guaranteedPods, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, taintsOfLowNodes, podEvictor, node.node)
|
||||||
}
|
}
|
||||||
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", podEvictor.NodeEvicted(node.node), node.node.Name, node.usage)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPods(inputPods []*v1.Pod,
|
func evictPods(
|
||||||
|
ctx context.Context,
|
||||||
|
inputPods []*v1.Pod,
|
||||||
targetThresholds api.ResourceThresholds,
|
targetThresholds api.ResourceThresholds,
|
||||||
nodeCapacity v1.ResourceList,
|
nodeCapacity v1.ResourceList,
|
||||||
nodeUsage api.ResourceThresholds,
|
nodeUsage api.ResourceThresholds,
|
||||||
@@ -255,7 +260,7 @@ func evictPods(inputPods []*v1.Pod,
|
|||||||
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
cUsage := utils.GetResourceRequest(pod, v1.ResourceCPU)
|
||||||
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
mUsage := utils.GetResourceRequest(pod, v1.ResourceMemory)
|
||||||
|
|
||||||
success, err := podEvictor.EvictPod(pod, node)
|
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -325,10 +330,10 @@ func sortPodsBasedOnPriority(evictablePods []*v1.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
// createNodePodsMap returns nodepodsmap with evictable pods on node.
|
||||||
func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
func createNodePodsMap(ctx context.Context, client clientset.Interface, nodes []*v1.Node) NodePodsMap {
|
||||||
npm := NodePodsMap{}
|
npm := NodePodsMap{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
pods, err := podutil.ListPodsOnANode(client, node)
|
pods, err := podutil.ListPodsOnANode(ctx, client, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -69,6 +70,7 @@ func makeGuaranteedPod(pod *v1.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestLowNodeUtilization(t *testing.T) {
|
func TestLowNodeUtilization(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
n1NodeName := "n1"
|
n1NodeName := "n1"
|
||||||
n2NodeName := "n2"
|
n2NodeName := "n2"
|
||||||
n3NodeName := "n3"
|
n3NodeName := "n3"
|
||||||
@@ -344,7 +346,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
npm := createNodePodsMap(fakeClient, nodes)
|
npm := createNodePodsMap(ctx, fakeClient, nodes)
|
||||||
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
lowNodes, targetNodes := classifyNodes(npm, test.thresholds, test.targetThresholds, false)
|
||||||
if len(lowNodes) != 1 {
|
if len(lowNodes) != 1 {
|
||||||
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
t.Errorf("After ignoring unschedulable nodes, expected only one node to be under utilized.")
|
||||||
@@ -357,7 +359,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
|||||||
nodes,
|
nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
evictPodsFromTargetNodes(targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
evictPodsFromTargetNodes(ctx, targetNodes, lowNodes, test.targetThresholds, false, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if test.expectedPodsEvicted != podsEvicted {
|
if test.expectedPodsEvicted != podsEvicted {
|
||||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||||
@@ -504,6 +506,7 @@ func newFake(objects ...runtime.Object) *core.Fake {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWithTaints(t *testing.T) {
|
func TestWithTaints(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
strategy := api.DeschedulerStrategy{
|
strategy := api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: api.StrategyParameters{
|
||||||
@@ -629,7 +632,7 @@ func TestWithTaints(t *testing.T) {
|
|||||||
item.nodes,
|
item.nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
LowNodeUtilization(&fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
LowNodeUtilization(ctx, &fake.Clientset{Fake: *fakePtr}, strategy, item.nodes, false, podEvictor)
|
||||||
|
|
||||||
if item.evictionsExpected != evictionCounter {
|
if item.evictionsExpected != evictionCounter {
|
||||||
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, evictionCounter)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@@ -27,7 +28,7 @@ import (
|
|||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
|
||||||
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
|
||||||
|
|
||||||
@@ -36,7 +37,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De
|
|||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
|
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
|
||||||
}
|
}
|
||||||
@@ -45,7 +46,7 @@ func RemovePodsViolatingNodeAffinity(client clientset.Interface, strategy api.De
|
|||||||
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||||
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
|
||||||
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
klog.V(1).Infof("Evicting pod: %v", pod.Name)
|
||||||
if _, err := podEvictor.EvictPod(pod, node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pod, node); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -29,7 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
Params: api.StrategyParameters{
|
Params: api.StrategyParameters{
|
||||||
@@ -158,7 +159,7 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
|||||||
tc.nodes,
|
tc.nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingNodeAffinity(fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, false, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
@@ -28,10 +29,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
|
||||||
func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//no pods evicted as error encountered retrieving evictable Pods
|
//no pods evicted as error encountered retrieving evictable Pods
|
||||||
return
|
return
|
||||||
@@ -44,7 +45,7 @@ func RemovePodsViolatingNodeTaints(client clientset.Interface, strategy api.Desc
|
|||||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||||
) {
|
) {
|
||||||
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
klog.V(2).Infof("Not all taints with NoSchedule effect are tolerated after update for pod %v on node %v", pods[i].Name, node.Name)
|
||||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -43,7 +44,7 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||||
@@ -171,7 +172,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
|||||||
tc.nodes,
|
tc.nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingNodeTaints(fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
RemovePodsViolatingNodeTaints(ctx, fakeClient, api.DeschedulerStrategy{}, tc.nodes, tc.evictLocalStoragePods, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"sigs.k8s.io/descheduler/pkg/api"
|
"sigs.k8s.io/descheduler/pkg/api"
|
||||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||||
@@ -29,17 +30,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
|
||||||
func RemovePodsViolatingInterPodAntiAffinity(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
klog.V(1).Infof("Processing node: %#v\n", node.Name)
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
totalPods := len(pods)
|
totalPods := len(pods)
|
||||||
for i := 0; i < totalPods; i++ {
|
for i := 0; i < totalPods; i++ {
|
||||||
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
if checkPodsWithAntiAffinityExist(pods[i], pods) {
|
||||||
success, err := podEvictor.EvictPod(pods[i], node)
|
success, err := podEvictor.EvictPod(ctx, pods[i], node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@@ -30,6 +31,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPodAntiAffinity(t *testing.T) {
|
func TestPodAntiAffinity(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
p1 := test.BuildTestPod("p1", 100, 0, node.Name, nil)
|
||||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
p2 := test.BuildTestPod("p2", 100, 0, node.Name, nil)
|
||||||
@@ -84,7 +86,7 @@ func TestPodAntiAffinity(t *testing.T) {
|
|||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsViolatingInterPodAntiAffinity(fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, api.DeschedulerStrategy{}, []*v1.Node{node}, false, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != test.expectedEvictedPodCount {
|
if podsEvicted != test.expectedEvictedPodCount {
|
||||||
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@@ -28,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
|
||||||
func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
if strategy.Params.MaxPodLifeTimeSeconds == nil {
|
||||||
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
klog.V(1).Infof("MaxPodLifeTimeSeconds not set")
|
||||||
return
|
return
|
||||||
@@ -36,9 +37,9 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n
|
|||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %#v", node.Name)
|
klog.V(1).Infof("Processing node: %#v", node.Name)
|
||||||
pods := listOldPodsOnNode(client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
pods := listOldPodsOnNode(ctx, client, node, *strategy.Params.MaxPodLifeTimeSeconds, evictLocalStoragePods)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
success, err := podEvictor.EvictPod(pod, node)
|
success, err := podEvictor.EvictPod(ctx, pod, node)
|
||||||
if success {
|
if success {
|
||||||
klog.V(1).Infof("Evicted pod: %#v\n because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
klog.V(1).Infof("Evicted pod: %#v\n because it was created more than %v seconds ago", pod.Name, *strategy.Params.MaxPodLifeTimeSeconds)
|
||||||
}
|
}
|
||||||
@@ -51,8 +52,8 @@ func PodLifeTime(client clientset.Interface, strategy api.DeschedulerStrategy, n
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listOldPodsOnNode(client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
func listOldPodsOnNode(ctx context.Context, client clientset.Interface, node *v1.Node, maxAge uint, evictLocalStoragePods bool) []*v1.Pod {
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -31,6 +32,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestPodLifeTime(t *testing.T) {
|
func TestPodLifeTime(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
node := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||||
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
|
||||||
newerPodCreationTime := metav1.NewTime(time.Now())
|
newerPodCreationTime := metav1.NewTime(time.Now())
|
||||||
@@ -157,7 +159,7 @@ func TestPodLifeTime(t *testing.T) {
|
|||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
)
|
)
|
||||||
|
|
||||||
PodLifeTime(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
PodLifeTime(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||||
podsEvicted := podEvictor.TotalEvicted()
|
podsEvicted := podEvictor.TotalEvicted()
|
||||||
if podsEvicted != tc.expectedEvictedPodCount {
|
if podsEvicted != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@@ -29,14 +30,14 @@ import (
|
|||||||
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
|
||||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||||
func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, evictLocalStoragePods bool, podEvictor *evictions.PodEvictor) {
|
||||||
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
if strategy.Params.PodsHavingTooManyRestarts == nil || strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold < 1 {
|
||||||
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
klog.V(1).Infof("PodsHavingTooManyRestarts thresholds not set")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
klog.V(1).Infof("Processing node: %s", node.Name)
|
klog.V(1).Infof("Processing node: %s", node.Name)
|
||||||
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
|
pods, err := podutil.ListEvictablePodsOnNode(ctx, client, node, evictLocalStoragePods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error when list pods at node %s", node.Name)
|
klog.Errorf("Error when list pods at node %s", node.Name)
|
||||||
continue
|
continue
|
||||||
@@ -51,7 +52,7 @@ func RemovePodsHavingTooManyRestarts(client clientset.Interface, strategy api.De
|
|||||||
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
} else if restarts < strategy.Params.PodsHavingTooManyRestarts.PodRestartThreshold {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err := podEvictor.EvictPod(pods[i], node); err != nil {
|
if _, err := podEvictor.EvictPod(ctx, pods[i], node); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package strategies
|
package strategies
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -79,6 +80,7 @@ func initPods(node *v1.Node) []v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32) api.DeschedulerStrategy {
|
||||||
return api.DeschedulerStrategy{
|
return api.DeschedulerStrategy{
|
||||||
Enabled: enabled,
|
Enabled: enabled,
|
||||||
@@ -171,7 +173,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
|||||||
[]*v1.Node{node},
|
[]*v1.Node{node},
|
||||||
)
|
)
|
||||||
|
|
||||||
RemovePodsHavingTooManyRestarts(fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, []*v1.Node{node}, false, podEvictor)
|
||||||
actualEvictedPodCount := podEvictor.TotalEvicted()
|
actualEvictedPodCount := podEvictor.TotalEvicted()
|
||||||
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
if actualEvictedPodCount != tc.expectedEvictedPodCount {
|
||||||
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"math"
|
"math"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -95,7 +96,7 @@ func RcByNameContainer(name string, replicas int32, labels map[string]string, gr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
// startEndToEndForLowNodeUtilization tests the lownode utilization strategy.
|
||||||
func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
|
func startEndToEndForLowNodeUtilization(ctx context.Context, clientset clientset.Interface, nodeInformer coreinformers.NodeInformer) {
|
||||||
var thresholds = make(deschedulerapi.ResourceThresholds)
|
var thresholds = make(deschedulerapi.ResourceThresholds)
|
||||||
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
var targetThresholds = make(deschedulerapi.ResourceThresholds)
|
||||||
thresholds[v1.ResourceMemory] = 20
|
thresholds[v1.ResourceMemory] = 20
|
||||||
@@ -110,7 +111,7 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor
|
|||||||
klog.Fatalf("%v", err)
|
klog.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
nodes, err := nodeutil.ReadyNodes(clientset, nodeInformer, "", stopChannel)
|
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "", stopChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("%v", err)
|
klog.Fatalf("%v", err)
|
||||||
}
|
}
|
||||||
@@ -133,18 +134,19 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface, nodeInfor
|
|||||||
nodes,
|
nodes,
|
||||||
)
|
)
|
||||||
|
|
||||||
strategies.LowNodeUtilization(clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
|
strategies.LowNodeUtilization(ctx, clientset, lowNodeUtilizationStrategy, nodes, false, podEvictor)
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestE2E(t *testing.T) {
|
func TestE2E(t *testing.T) {
|
||||||
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
// If we have reached here, it means cluster would have been already setup and the kubeconfig file should
|
||||||
// be in /tmp directory as admin.conf.
|
// be in /tmp directory as admin.conf.
|
||||||
|
ctx := context.Background()
|
||||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error during client creation with %v", err)
|
t.Errorf("Error during client creation with %v", err)
|
||||||
}
|
}
|
||||||
nodeList, err := clientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error listing node with %v", err)
|
t.Errorf("Error listing node with %v", err)
|
||||||
}
|
}
|
||||||
@@ -159,11 +161,11 @@ func TestE2E(t *testing.T) {
|
|||||||
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
// Assumption: We would have 3 node cluster by now. Kubeadm brings all the master components onto master node.
|
||||||
// So, the last node would have least utilization.
|
// So, the last node would have least utilization.
|
||||||
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
rc := RcByNameContainer("test-rc", int32(15), map[string]string{"test": "app"}, nil)
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error creating deployment %v", err)
|
t.Errorf("Error creating deployment %v", err)
|
||||||
}
|
}
|
||||||
evictPods(t, clientSet, nodeInformer, nodeList, rc)
|
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
||||||
|
|
||||||
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
rc.Spec.Replicas = func(i int32) *int32 { return &i }(15)
|
||||||
@@ -176,14 +178,15 @@ func TestE2E(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(rc)
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Create(ctx, rc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error creating deployment %v", err)
|
t.Errorf("Error creating deployment %v", err)
|
||||||
}
|
}
|
||||||
evictPods(t, clientSet, nodeInformer, nodeList, rc)
|
evictPods(ctx, t, clientSet, nodeInformer, nodeList, rc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeschedulingInterval(t *testing.T) {
|
func TestDeschedulingInterval(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error during client creation with %v", err)
|
t.Errorf("Error during client creation with %v", err)
|
||||||
@@ -203,7 +206,7 @@ func TestDeschedulingInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
stopChannel := make(chan struct{})
|
stopChannel := make(chan struct{})
|
||||||
if err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil {
|
if err := descheduler.RunDeschedulerStrategies(ctx, s, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel); err != nil {
|
||||||
t.Errorf("Error running descheduler strategies: %+v", err)
|
t.Errorf("Error running descheduler strategies: %+v", err)
|
||||||
}
|
}
|
||||||
c <- true
|
c <- true
|
||||||
@@ -217,7 +220,7 @@ func TestDeschedulingInterval(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
func evictPods(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||||
var leastLoadedNode v1.Node
|
var leastLoadedNode v1.Node
|
||||||
podsBefore := math.MaxInt16
|
podsBefore := math.MaxInt16
|
||||||
for i := range nodeList.Items {
|
for i := range nodeList.Items {
|
||||||
@@ -226,7 +229,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// List all the pods on the current Node
|
// List all the pods on the current Node
|
||||||
podsOnANode, err := podutil.ListEvictablePodsOnNode(clientSet, &nodeList.Items[i], true)
|
podsOnANode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &nodeList.Items[i], true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error listing pods on a node %v", err)
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
}
|
}
|
||||||
@@ -237,8 +240,8 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.Log("Eviction of pods starting")
|
t.Log("Eviction of pods starting")
|
||||||
startEndToEndForLowNodeUtilization(clientSet, nodeInformer)
|
startEndToEndForLowNodeUtilization(ctx, clientSet, nodeInformer)
|
||||||
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(clientSet, &leastLoadedNode, true)
|
podsOnleastUtilizedNode, err := podutil.ListEvictablePodsOnNode(ctx, clientSet, &leastLoadedNode, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error listing pods on a node %v", err)
|
t.Errorf("Error listing pods on a node %v", err)
|
||||||
}
|
}
|
||||||
@@ -249,14 +252,14 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf
|
|||||||
|
|
||||||
//set number of replicas to 0
|
//set number of replicas to 0
|
||||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
rc.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||||
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(rc)
|
_, err = clientSet.CoreV1().ReplicationControllers("default").Update(ctx, rc, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error updating replica controller %v", err)
|
t.Errorf("Error updating replica controller %v", err)
|
||||||
}
|
}
|
||||||
allPodsDeleted := false
|
allPodsDeleted := false
|
||||||
//wait 30 seconds until all pods are deleted
|
//wait 30 seconds until all pods are deleted
|
||||||
for i := 0; i < 6; i++ {
|
for i := 0; i < 6; i++ {
|
||||||
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(rc.Name, metav1.GetOptions{})
|
scale, _ := clientSet.CoreV1().ReplicationControllers("default").GetScale(ctx, rc.Name, metav1.GetOptions{})
|
||||||
if scale.Spec.Replicas == 0 {
|
if scale.Spec.Replicas == 0 {
|
||||||
allPodsDeleted = true
|
allPodsDeleted = true
|
||||||
break
|
break
|
||||||
@@ -268,7 +271,7 @@ func evictPods(t *testing.T, clientSet clientset.Interface, nodeInformer coreinf
|
|||||||
t.Errorf("Deleting of rc pods took too long")
|
t.Errorf("Deleting of rc pods took too long")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = clientSet.CoreV1().ReplicationControllers("default").Delete(rc.Name, &metav1.DeleteOptions{})
|
err = clientSet.CoreV1().ReplicationControllers("default").Delete(ctx, rc.Name, metav1.DeleteOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error deleting rc %v", err)
|
t.Errorf("Error deleting rc %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user