mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Bump to k8s 1.25-rc.0
This commit is contained in:
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -57,6 +58,6 @@ type DeschedulerConfiguration struct {
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration
|
||||
}
|
||||
|
||||
@@ -19,9 +19,9 @@ package v1alpha1
|
||||
import (
|
||||
"time"
|
||||
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -58,6 +58,6 @@ type DeschedulerConfiguration struct {
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -3,11 +3,13 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
//policy "k8s.io/api/policy/v1beta1"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
@@ -46,7 +48,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
go func() {
|
||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
|
||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||
errChan <- err
|
||||
}()
|
||||
select {
|
||||
@@ -97,7 +99,87 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
dp := &api.DeschedulerPolicy{
|
||||
Strategies: api.StrategyList{
|
||||
"RemoveDuplicates": api.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
go func() {
|
||||
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
|
||||
errChan <- err
|
||||
}()
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
case <-time.After(1 * time.Second):
|
||||
// Wait for few cycles and then verify the only pod still exists
|
||||
}
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
if len(pods.Items) != 3 {
|
||||
t.Errorf("Pods number should be 3 before evict")
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(100*time.Millisecond, 5*time.Second, func() (bool, error) {
|
||||
// Get over evicted pod result in panic
|
||||
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
|
||||
// List is better, it does not panic.
|
||||
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err == nil {
|
||||
if len(pods.Items) > 2 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,3 +252,54 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
t.Fatal("Root ctx should have canceled immediately")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSimpleClientset(t *testing.T) {
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-1",
|
||||
Namespace: "default",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
client.CoreV1().Pods("default").Create(context.Background(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-2",
|
||||
Namespace: "default",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
err := client.CoreV1().Pods("default").EvictV1(context.Background(), &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-2",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
|
||||
}
|
||||
|
||||
pods, err := client.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{})
|
||||
// err: item[0]: can't assign or convert v1beta1.Eviction into v1.Pod
|
||||
if err != nil {
|
||||
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
|
||||
} else {
|
||||
t.Log(len(pods.Items))
|
||||
t.Logf("TestNewSimpleClientset() res = %v", pods)
|
||||
}
|
||||
|
||||
err = client.PolicyV1().Evictions("default").Evict(context.Background(), &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-1",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
pods, err = client.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{})
|
||||
// err: item[0]: can't assign or convert v1beta1.Eviction into v1.Pod
|
||||
if err != nil {
|
||||
t.Errorf("TestNewSimpleClientset() res = %v", err.Error())
|
||||
} else {
|
||||
t.Log(len(pods.Items))
|
||||
t.Logf("TestNewSimpleClientset() res = %v", pods)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -193,7 +193,7 @@ func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, poli
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
Reference in New Issue
Block a user