1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

refactor: replace k8s.io/utils/pointer with k8s.io/utils/ptr

Signed-off-by: Emin Aktas <eminaktas34@gmail.com>
This commit is contained in:
Emin Aktas
2024-07-11 11:17:13 +03:00
parent b614c8bc7c
commit f8e128d862
23 changed files with 309 additions and 120 deletions

View File

@@ -32,8 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/events"
"k8s.io/utils/pointer"
utilpointer "k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
)
@@ -75,9 +74,9 @@ func TestRemoveDuplicates(t *testing.T) {
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
@@ -88,7 +87,7 @@ func TestRemoveDuplicates(t *testing.T) {
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",
@@ -112,7 +111,7 @@ func TestRemoveDuplicates(t *testing.T) {
description: "Evict Pod even Pods schedule to specific node",
replicasNum: 4,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = pointer.Int32(4)
deployment.Spec.Replicas = utilptr.To[int32](4)
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
},
expectedEvictedPodCount: 2,
@@ -121,7 +120,7 @@ func TestRemoveDuplicates(t *testing.T) {
description: "Evict Pod even Pods with local storage",
replicasNum: 5,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = pointer.Int32(5)
deployment.Spec.Replicas = utilptr.To[int32](5)
deployment.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
@@ -139,7 +138,7 @@ func TestRemoveDuplicates(t *testing.T) {
description: "Ignores eviction with minReplicas of 4",
replicasNum: 3,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = pointer.Int32(3)
deployment.Spec.Replicas = utilptr.To[int32](3)
},
expectedEvictedPodCount: 0,
minReplicas: 4,

View File

@@ -12,7 +12,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
@@ -146,7 +146,7 @@ func initFailedJob(name, namespace string) *batchv1.Job {
Spec: podSpec,
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
},
BackoffLimit: pointer.Int32(0),
BackoffLimit: utilptr.To[int32](0),
},
}
}

View File

@@ -30,9 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
utilpointer "k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
)
@@ -155,7 +153,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(replicas),
Replicas: utilptr.To[int32](replicas),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
},
@@ -165,9 +163,9 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
@@ -178,7 +176,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
Image: "registry.k8s.io/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",

View File

@@ -39,8 +39,7 @@ import (
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/events"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/utils/pointer"
utilpointer "k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
@@ -64,7 +63,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
// Add "name": name to the labels, overwriting if it exists.
labels["name"] = name
if gracePeriod == nil {
gracePeriod = pointer.Int64(0)
gracePeriod = utilptr.To[int64](0)
}
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
@@ -76,7 +75,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
Namespace: namespace,
},
Spec: v1.ReplicationControllerSpec{
Replicas: pointer.Int32(replicas),
Replicas: utilptr.To[int32](replicas),
Selector: map[string]string{
"name": name,
},
@@ -95,7 +94,7 @@ func DsByNameContainer(name, namespace string, labels map[string]string, gracePe
// Add "name": name to the labels, overwriting if it exists.
labels["name"] = name
if gracePeriod == nil {
gracePeriod = pointer.Int64(0)
gracePeriod = utilptr.To[int64](0)
}
return &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
@@ -312,9 +311,9 @@ func TestLowNodeUtilization(t *testing.T) {
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
@@ -1102,7 +1101,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) {
oldestPod := podList.Items[0]
t.Log("Scale the rs to 5 replicas with the 4 new pods having a more recent creation timestamp")
rc.Spec.Replicas = pointer.Int32(5)
rc.Spec.Replicas = utilptr.To[int32](5)
rc, err = clientSet.CoreV1().ReplicationControllers(rc.Namespace).Update(ctx, rc, metav1.UpdateOptions{})
if err != nil {
t.Errorf("Error updating deployment %v", err)
@@ -1252,7 +1251,7 @@ func deleteDS(ctx context.Context, t *testing.T, clientSet clientset.Interface,
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
// set number of replicas to 0
rcdeepcopy := rc.DeepCopy()
rcdeepcopy.Spec.Replicas = pointer.Int32(0)
rcdeepcopy.Spec.Replicas = utilptr.To[int32](0)
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Error updating replica controller %v", err)
}
@@ -1401,9 +1400,9 @@ func createBalancedPodForNodes(
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},

View File

@@ -29,9 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/events"
"k8s.io/utils/pointer"
utilpointer "k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
@@ -66,7 +64,7 @@ func TestTooManyRestarts(t *testing.T) {
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Spec: appsv1.DeploymentSpec{
Replicas: pointer.Int32(4),
Replicas: utilptr.To[int32](4),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
@@ -76,9 +74,9 @@ func TestTooManyRestarts(t *testing.T) {
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
@@ -91,7 +89,7 @@ func TestTooManyRestarts(t *testing.T) {
Args: []string{"-c", "sleep 1s && exit 1"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",

View File

@@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
utilpointer "k8s.io/utils/pointer"
utilptr "k8s.io/utils/ptr"
)
func BuildTestDeployment(name, namespace string, replicas int32, labels map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
@@ -48,7 +48,7 @@ func BuildTestDeployment(name, namespace string, replicas int32, labels map[stri
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: utilpointer.Int32(replicas),
Replicas: utilptr.To[int32](replicas),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": name,
@@ -58,7 +58,7 @@ func BuildTestDeployment(name, namespace string, replicas int32, labels map[stri
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: MakePodSpec("", utilpointer.Int64(0)),
Spec: MakePodSpec("", utilptr.To[int64](0)),
},
},
}
@@ -174,9 +174,9 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
return v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsNonRoot: utilpointer.Bool(true),
RunAsUser: utilpointer.Int64(1000),
RunAsGroup: utilpointer.Int64(1000),
RunAsNonRoot: utilptr.To(true),
RunAsUser: utilptr.To[int64](1000),
RunAsGroup: utilptr.To[int64](1000),
SeccompProfile: &v1.SeccompProfile{
Type: v1.SeccompProfileTypeRuntimeDefault,
},
@@ -197,7 +197,7 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
},
},
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: utilpointer.Bool(false),
AllowPrivilegeEscalation: utilptr.To(false),
Capabilities: &v1.Capabilities{
Drop: []v1.Capability{
"ALL",
@@ -274,7 +274,7 @@ func SetNodeExtendedResource(node *v1.Node, resourceName v1.ResourceName, reques
func DeleteDeployment(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
// set number of replicas to 0
deploymentCopy := deployment.DeepCopy()
deploymentCopy.Spec.Replicas = utilpointer.Int32(0)
deploymentCopy.Spec.Replicas = utilptr.To[int32](0)
if _, err := clientSet.AppsV1().Deployments(deploymentCopy.Namespace).Update(ctx, deploymentCopy, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Error updating replica controller %v", err)
}