1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 21:31:18 +01:00

remove pod security policy; additional policy/v1beta1 cleanup; use informers for descheduler unit tests

update go to 1.19 and helm kubernetes cluster to 1.25
bump -rc.0 to 1.25 GA
bump k8s utils library
bump golang-ci
use go 1.19 for helm github action
upgrade kubectl from 0.20 to 0.25

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
This commit is contained in:
Amir Alavi
2022-09-01 21:26:19 -04:00
parent c9b0fbe467
commit e8fae9a3b7
171 changed files with 23514 additions and 692 deletions

View File

@@ -3,16 +3,15 @@ package descheduler
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
//policy "k8s.io/api/policy/v1beta1"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/runtime"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/test"
@@ -44,21 +43,6 @@ func TestTaintsUpdated(t *testing.T) {
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
// Wait for few cycles and then verify the only pod still exists
}
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
@@ -81,24 +65,14 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to update node: %v\n", err)
}
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
// Get over evicted pod result in panic
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
// List is better, it does not panic.
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err == nil {
if len(pods.Items) > 0 {
return false, nil
}
return true, nil
}
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
return true, nil
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
return false, nil
}); err != nil {
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) != 1 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
@@ -136,21 +110,6 @@ func TestDuplicate(t *testing.T) {
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
// Wait for few cycles and then verify the only pod still exists
}
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
@@ -161,24 +120,14 @@ func TestDuplicate(t *testing.T) {
t.Errorf("Pods number should be 3 before evict")
}
if err := wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) {
// Get over evicted pod result in panic
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
// List is better, it does not panic.
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err == nil {
if len(pods.Items) > 2 {
return false, nil
}
return true, nil
}
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
return true, nil
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
return false, nil
}); err != nil {
if err := RunDeschedulerStrategies(ctx, rs, dp, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) == 0 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
@@ -204,7 +153,7 @@ func TestRootCancel(t *testing.T) {
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
@@ -239,7 +188,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
errChan <- err
}()
cancel()
@@ -253,53 +202,17 @@ func TestRootCancelWithNoInterval(t *testing.T) {
}
}
func TestNewSimpleClientset(t *testing.T) {
client := fakeclientset.NewSimpleClientset()
client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: "default",
},
}, metav1.CreateOptions{})
client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
Namespace: "default",
},
}, metav1.CreateOptions{})
err := client.CoreV1().Pods("default").EvictV1(context.Background(), &policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
},
})
if err != nil {
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
}
pods, err := client.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{})
// err: item[0]: can't assign or convert v1beta1.Eviction into v1.Pod
if err != nil {
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
} else {
t.Log(len(pods.Items))
t.Logf("TestNewSimpleClientset() res = %v", pods)
}
err = client.PolicyV1().Evictions("default").Evict(context.Background(), &policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
},
})
if err != nil {
t.Fatalf(err.Error())
}
pods, err = client.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{})
// err: item[0]: can't assign or convert v1beta1.Eviction into v1.Pod
if err != nil {
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
} else {
t.Log(len(pods.Items))
t.Logf("TestNewSimpleClientset() res = %v", pods)
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
*evictedPods = append(*evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
}
}

View File

@@ -22,7 +22,7 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -319,7 +319,7 @@ func TestHighNodeUtilization(t *testing.T) {
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
//These won't be evicted
// These won't be evicted
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
@@ -467,7 +467,7 @@ func TestHighNodeUtilization(t *testing.T) {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
if eviction, ok := obj.(*v1beta1.Eviction); ok {
if eviction, ok := obj.(*policy.Eviction); ok {
if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
@@ -561,7 +561,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name: "No taints",
nodes: []*v1.Node{n1, n2, n3},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
@@ -574,7 +574,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name: "No pod tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
// Node 3 pods
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
@@ -585,7 +585,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name: "Pod which tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 100, 0, n1.Name, test.SetRSOwnerRef),
podThatToleratesTaint,
// Node 3 pods

View File

@@ -19,15 +19,15 @@ package nodeutilization
import (
"context"
"fmt"
"testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/api/policy/v1beta1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -733,7 +733,7 @@ func TestLowNodeUtilization(t *testing.T) {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
if eviction, ok := obj.(*v1beta1.Eviction); ok {
if eviction, ok := obj.(*policy.Eviction); ok {
if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
@@ -751,7 +751,7 @@ func TestLowNodeUtilization(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
policy.SchemeGroupVersion.String(),
false,
nil,
nil,
@@ -832,7 +832,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
name: "No taints",
nodes: []*v1.Node{n1, n2, n3},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
@@ -850,7 +850,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
name: "No pod tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
@@ -868,7 +868,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
name: "Pod which tolerates node taint",
nodes: []*v1.Node{n1, n3withTaints},
pods: []*v1.Pod{
//Node 1 pods
// Node 1 pods
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
@@ -911,7 +911,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
policy.SchemeGroupVersion.String(),
false,
&item.evictionsExpected,
nil,