mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
e2e: use kubernetes utils pointer library
This commit is contained in:
2
go.mod
2
go.mod
@@ -15,6 +15,7 @@ require (
|
||||
k8s.io/component-helpers v0.24.0
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
k8s.io/kubectl v0.20.5
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
sigs.k8s.io/mdtoc v1.0.1
|
||||
)
|
||||
|
||||
@@ -109,7 +110,6 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/utils/pointer"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
@@ -90,7 +91,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description: "Evict Pod even Pods schedule to specific node",
|
||||
replicasNum: 4,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(4)
|
||||
deployment.Spec.Replicas = pointer.Int32(4)
|
||||
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
@@ -99,7 +100,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description: "Evict Pod even Pods with local storage",
|
||||
replicasNum: 5,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(5)
|
||||
deployment.Spec.Replicas = pointer.Int32(5)
|
||||
deployment.Spec.Template.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
|
||||
@@ -113,7 +114,6 @@ func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
podSpec.Containers[0].Command = []string{"/bin/false"}
|
||||
podSpec.RestartPolicy = v1.RestartPolicyNever
|
||||
labelsSet := labels.Set{"test": name, "name": name}
|
||||
jobBackoffLimit := int32(0)
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labelsSet,
|
||||
@@ -125,7 +125,7 @@ func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
Spec: podSpec,
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
|
||||
},
|
||||
BackoffLimit: &jobBackoffLimit,
|
||||
BackoffLimit: pointer.Int32(0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
@@ -153,7 +154,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Replicas: pointer.Int32(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1qos "k8s.io/kubectl/pkg/util/qos"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -76,12 +77,10 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
|
||||
// RcByNameContainer returns a ReplicationController with specified name and container
|
||||
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
|
||||
zeroGracePeriod := int64(0)
|
||||
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = &zeroGracePeriod
|
||||
gracePeriod = pointer.Int64(0)
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -93,7 +92,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Replicas: pointer.Int32(replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
@@ -981,7 +980,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) {
|
||||
oldestPod := podList.Items[0]
|
||||
|
||||
t.Log("Scale the rs to 5 replicas with the 4 new pods having a more recent creation timestamp")
|
||||
rc.Spec.Replicas = func(i int32) *int32 { return &i }(5)
|
||||
rc.Spec.Replicas = pointer.Int32(5)
|
||||
rc, err = clientSet.CoreV1().ReplicationControllers(rc.Namespace).Update(ctx, rc, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error updating deployment %v", err)
|
||||
@@ -1094,7 +1093,7 @@ func waitForTerminatingPodsToDisappear(ctx context.Context, t *testing.T, client
|
||||
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||
// set number of replicas to 0
|
||||
rcdeepcopy := rc.DeepCopy()
|
||||
rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0)
|
||||
rcdeepcopy.Spec.Replicas = pointer.Int32(0)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
@@ -61,7 +62,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(4),
|
||||
Replicas: pointer.Int32(4),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user