mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #1464 from eminaktas/update-pointer-to-ptr
refactor: replace k8s.io/utils/pointer with k8s.io/utils/ptr
This commit is contained in:
2
go.mod
2
go.mod
@@ -21,7 +21,7 @@ require (
|
||||
k8s.io/component-base v0.30.0
|
||||
k8s.io/component-helpers v0.30.0
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
)
|
||||
|
||||
|
||||
4
go.sum
4
go.sum
@@ -334,8 +334,8 @@ k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g=
|
||||
k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
@@ -180,7 +180,7 @@ var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*a
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
Constraints: constraints,
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
}
|
||||
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
@@ -51,7 +51,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
|
||||
"dedicated=special-user",
|
||||
"reserved",
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -116,12 +116,12 @@ func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
FailedPods: &FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -131,7 +131,7 @@ func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
@@ -189,7 +189,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T)
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -260,7 +260,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *te
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -327,7 +327,7 @@ func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T)
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -403,13 +403,13 @@ func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -418,7 +418,7 @@ func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||
result: &api.PluginConfig{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
@@ -433,7 +433,7 @@ func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
@@ -491,7 +491,7 @@ func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
|
||||
RemoveDuplicates: &RemoveDuplicates{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -511,7 +511,7 @@ func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
@@ -559,7 +559,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: true,
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -569,7 +569,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -586,7 +586,7 @@ func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -646,7 +646,7 @@ func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
@@ -759,7 +759,7 @@ func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -127,7 +127,7 @@ func TestNewPodEvictor(t *testing.T) {
|
||||
podEvictor := NewPodEvictor(
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
NewOptions().WithMaxPodsToEvictPerNode(utilpointer.Uint(1)),
|
||||
NewOptions().WithMaxPodsToEvictPerNode(utilptr.To[uint](1)),
|
||||
)
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
@@ -154,11 +154,11 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
{
|
||||
description: "convert global policy fields to defaultevictor",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
EvictFailedBarePods: utilpointer.Bool(true),
|
||||
EvictLocalStoragePods: utilpointer.Bool(true),
|
||||
EvictSystemCriticalPods: utilpointer.Bool(true),
|
||||
EvictDaemonSetPods: utilpointer.Bool(true),
|
||||
IgnorePVCPods: utilpointer.Bool(true),
|
||||
EvictFailedBarePods: utilptr.To(true),
|
||||
EvictLocalStoragePods: utilptr.To(true),
|
||||
EvictSystemCriticalPods: utilptr.To(true),
|
||||
EvictDaemonSetPods: utilptr.To(true),
|
||||
IgnorePVCPods: utilptr.To(true),
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
@@ -484,7 +484,7 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -550,7 +550,7 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
FailedPods: &v1alpha1.FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
@@ -684,7 +684,7 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
@@ -801,7 +801,7 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -837,7 +837,7 @@ func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriority: utilptr.To[int32](100),
|
||||
ThresholdPriorityClassName: "name",
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
@@ -888,7 +888,7 @@ func TestDecodeVersionedPolicy(t *testing.T) {
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(utils.SystemCriticalPriority),
|
||||
Value: utilptr.To[int32](utils.SystemCriticalPriority),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -925,7 +925,7 @@ strategies:
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](5),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -969,7 +969,7 @@ strategies:
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(0),
|
||||
Value: utilptr.To[int32](0),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -979,7 +979,7 @@ strategies:
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](5),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1052,7 +1052,7 @@ profiles:
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
@@ -1209,14 +1209,14 @@ profiles:
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1267,14 +1267,14 @@ profiles:
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -68,7 +68,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: pointer.Uint(600),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](600),
|
||||
States: []string{"Pending"},
|
||||
},
|
||||
want: &PodLifeTimeArgs{
|
||||
@@ -55,7 +55,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: pointer.Uint(600),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](600),
|
||||
States: []string{"Pending"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -15,7 +15,7 @@ package removefailedpods
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@@ -36,7 +36,7 @@ func SetDefaults_RemoveFailedPodsArgs(obj runtime.Object) {
|
||||
args.ExcludeOwnerKinds = nil
|
||||
}
|
||||
if args.MinPodLifetimeSeconds == nil {
|
||||
args.MinPodLifetimeSeconds = utilpointer.Uint(3600)
|
||||
args.MinPodLifetimeSeconds = utilptr.To[uint](3600)
|
||||
}
|
||||
if args.Reasons == nil {
|
||||
args.Reasons = nil
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSetDefaults_RemoveFailedPodsArgs(t *testing.T) {
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
MinPodLifetimeSeconds: pointer.Uint(0),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](0),
|
||||
Reasons: []string{"reason"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
@@ -56,7 +56,7 @@ func TestSetDefaults_RemoveFailedPodsArgs(t *testing.T) {
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
MinPodLifetimeSeconds: pointer.Uint(0),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](0),
|
||||
Reasons: []string{"reason"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
|
||||
@@ -16,7 +16,7 @@ package removepodsviolatingtopologyspreadconstraint
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@@ -34,7 +34,7 @@ func SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(obj runtime.Obj
|
||||
args.LabelSelector = nil
|
||||
}
|
||||
if args.TopologyBalanceNodeFit == nil {
|
||||
args.TopologyBalanceNodeFit = utilpointer.Bool(true)
|
||||
args.TopologyBalanceNodeFit = utilptr.To(true)
|
||||
}
|
||||
if len(args.Constraints) == 0 {
|
||||
args.Constraints = append(args.Constraints, v1.DoNotSchedule)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
|
||||
Namespaces: nil,
|
||||
LabelSelector: nil,
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -62,7 +62,7 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -70,16 +70,16 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RemovePodsViolatingTopologySpreadConstraintArgs with TopologyBalanceNodeFit=false",
|
||||
in: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(false),
|
||||
TopologyBalanceNodeFit: utilptr.To(false),
|
||||
},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(false),
|
||||
TopologyBalanceNodeFit: utilptr.To(false),
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
},
|
||||
},
|
||||
@@ -90,7 +90,7 @@ func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.
|
||||
},
|
||||
want: &RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
TopologyBalanceNodeFit: utilptr.To(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
v1helper "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
@@ -313,7 +313,7 @@ func (d *RemovePodsViolatingTopologySpreadConstraint) balanceDomains(
|
||||
isEvictable := d.handle.Evictor().Filter
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
getPodsAssignedToNode := d.handle.GetPodsAssignedToNodeFunc()
|
||||
topologyBalanceNodeFit := utilpointer.BoolDeref(d.args.TopologyBalanceNodeFit, true)
|
||||
topologyBalanceNodeFit := utilptr.Deref(d.args.TopologyBalanceNodeFit, true)
|
||||
|
||||
eligibleNodes := filterEligibleNodes(nodes, tsc)
|
||||
nodesBelowIdealAvg := filterNodesBelowIdealAvg(eligibleNodes, sortedDomains, tsc.TopologyKey, idealAvg)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -1205,7 +1205,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}),
|
||||
expectedEvictedCount: 1,
|
||||
namespaces: []string{"ns1"},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{TopologyBalanceNodeFit: utilpointer.Bool(false)},
|
||||
args: RemovePodsViolatingTopologySpreadConstraintArgs{TopologyBalanceNodeFit: utilptr.To(false)},
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -32,8 +32,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/utils/pointer"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
)
|
||||
@@ -75,9 +74,9 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
@@ -88,7 +87,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
@@ -112,7 +111,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description: "Evict Pod even Pods schedule to specific node",
|
||||
replicasNum: 4,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = pointer.Int32(4)
|
||||
deployment.Spec.Replicas = utilptr.To[int32](4)
|
||||
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
@@ -121,7 +120,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description: "Evict Pod even Pods with local storage",
|
||||
replicasNum: 5,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = pointer.Int32(5)
|
||||
deployment.Spec.Replicas = utilptr.To[int32](5)
|
||||
deployment.Spec.Template.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
@@ -139,7 +138,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
description: "Ignores eviction with minReplicas of 4",
|
||||
replicasNum: 3,
|
||||
beforeFunc: func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Replicas = pointer.Int32(3)
|
||||
deployment.Spec.Replicas = utilptr.To[int32](3)
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
minReplicas: 4,
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
@@ -146,7 +146,7 @@ func initFailedJob(name, namespace string) *batchv1.Job {
|
||||
Spec: podSpec,
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
|
||||
},
|
||||
BackoffLimit: pointer.Int32(0),
|
||||
BackoffLimit: utilptr.To[int32](0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,9 +30,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
@@ -155,7 +153,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(replicas),
|
||||
Replicas: utilptr.To[int32](replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
@@ -165,9 +163,9 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
@@ -178,7 +176,7 @@ func createDeployment(ctx context.Context, clientSet clientset.Interface, namesp
|
||||
Image: "registry.k8s.io/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
|
||||
@@ -39,8 +39,7 @@ import (
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/events"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/utils/pointer"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -64,7 +63,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = pointer.Int64(0)
|
||||
gracePeriod = utilptr.To[int64](0)
|
||||
}
|
||||
return &v1.ReplicationController{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -76,7 +75,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: pointer.Int32(replicas),
|
||||
Replicas: utilptr.To[int32](replicas),
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
@@ -95,7 +94,7 @@ func DsByNameContainer(name, namespace string, labels map[string]string, gracePe
|
||||
// Add "name": name to the labels, overwriting if it exists.
|
||||
labels["name"] = name
|
||||
if gracePeriod == nil {
|
||||
gracePeriod = pointer.Int64(0)
|
||||
gracePeriod = utilptr.To[int64](0)
|
||||
}
|
||||
return &appsv1.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -312,9 +311,9 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
@@ -1102,7 +1101,7 @@ func TestPodLifeTimeOldestEvicted(t *testing.T) {
|
||||
oldestPod := podList.Items[0]
|
||||
|
||||
t.Log("Scale the rs to 5 replicas with the 4 new pods having a more recent creation timestamp")
|
||||
rc.Spec.Replicas = pointer.Int32(5)
|
||||
rc.Spec.Replicas = utilptr.To[int32](5)
|
||||
rc, err = clientSet.CoreV1().ReplicationControllers(rc.Namespace).Update(ctx, rc, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error updating deployment %v", err)
|
||||
@@ -1252,7 +1251,7 @@ func deleteDS(ctx context.Context, t *testing.T, clientSet clientset.Interface,
|
||||
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
|
||||
// set number of replicas to 0
|
||||
rcdeepcopy := rc.DeepCopy()
|
||||
rcdeepcopy.Spec.Replicas = pointer.Int32(0)
|
||||
rcdeepcopy.Spec.Replicas = utilptr.To[int32](0)
|
||||
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
@@ -1401,9 +1400,9 @@ func createBalancedPodForNodes(
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
|
||||
@@ -29,9 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
@@ -66,7 +64,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: pointer.Int32(4),
|
||||
Replicas: utilptr.To[int32](4),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
|
||||
},
|
||||
@@ -76,9 +74,9 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
@@ -91,7 +89,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
Args: []string{"-c", "sleep 1s && exit 1"},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func BuildTestDeployment(name, namespace string, replicas int32, labels map[string]string, apply func(deployment *appsv1.Deployment)) *appsv1.Deployment {
|
||||
@@ -48,7 +48,7 @@ func BuildTestDeployment(name, namespace string, replicas int32, labels map[stri
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: utilpointer.Int32(replicas),
|
||||
Replicas: utilptr.To[int32](replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"name": name,
|
||||
@@ -58,7 +58,7 @@ func BuildTestDeployment(name, namespace string, replicas int32, labels map[stri
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: MakePodSpec("", utilpointer.Int64(0)),
|
||||
Spec: MakePodSpec("", utilptr.To[int64](0)),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -174,9 +174,9 @@ func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node))
|
||||
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsNonRoot: utilpointer.Bool(true),
|
||||
RunAsUser: utilpointer.Int64(1000),
|
||||
RunAsGroup: utilpointer.Int64(1000),
|
||||
RunAsNonRoot: utilptr.To(true),
|
||||
RunAsUser: utilptr.To[int64](1000),
|
||||
RunAsGroup: utilptr.To[int64](1000),
|
||||
SeccompProfile: &v1.SeccompProfile{
|
||||
Type: v1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
@@ -197,7 +197,7 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: utilpointer.Bool(false),
|
||||
AllowPrivilegeEscalation: utilptr.To(false),
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{
|
||||
"ALL",
|
||||
@@ -274,7 +274,7 @@ func SetNodeExtendedResource(node *v1.Node, resourceName v1.ResourceName, reques
|
||||
func DeleteDeployment(ctx context.Context, t *testing.T, clientSet clientset.Interface, deployment *appsv1.Deployment) {
|
||||
// set number of replicas to 0
|
||||
deploymentCopy := deployment.DeepCopy()
|
||||
deploymentCopy.Spec.Replicas = utilpointer.Int32(0)
|
||||
deploymentCopy.Spec.Replicas = utilptr.To[int32](0)
|
||||
if _, err := clientSet.AppsV1().Deployments(deploymentCopy.Namespace).Update(ctx, deploymentCopy, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Error updating replica controller %v", err)
|
||||
}
|
||||
|
||||
195
vendor/k8s.io/utils/net/multi_listen.go
generated
vendored
Normal file
195
vendor/k8s.io/utils/net/multi_listen.go
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// connErrPair pairs conn and error which is returned by accept on sub-listeners.
|
||||
type connErrPair struct {
|
||||
conn net.Conn
|
||||
err error
|
||||
}
|
||||
|
||||
// multiListener implements net.Listener
|
||||
type multiListener struct {
|
||||
listeners []net.Listener
|
||||
wg sync.WaitGroup
|
||||
|
||||
// connCh passes accepted connections, from child listeners to parent.
|
||||
connCh chan connErrPair
|
||||
// stopCh communicates from parent to child listeners.
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// compile time check to ensure *multiListener implements net.Listener
|
||||
var _ net.Listener = &multiListener{}
|
||||
|
||||
// MultiListen returns net.Listener which can listen on and accept connections for
|
||||
// the given network on multiple addresses. Internally it uses stdlib to create
|
||||
// sub-listener and multiplexes connection requests using go-routines.
|
||||
// The network must be "tcp", "tcp4" or "tcp6".
|
||||
// It follows the semantics of net.Listen that primarily means:
|
||||
// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen
|
||||
// listens on all available unicast and anycast IP addresses of the local system.
|
||||
// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively.
|
||||
// 3. The host can accept names (e.g, localhost) and it will create a listener for at
|
||||
// most one of the host's IP.
|
||||
func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) {
|
||||
var lc net.ListenConfig
|
||||
return multiListen(
|
||||
ctx,
|
||||
network,
|
||||
addrs,
|
||||
func(ctx context.Context, network, address string) (net.Listener, error) {
|
||||
return lc.Listen(ctx, network, address)
|
||||
})
|
||||
}
|
||||
|
||||
// multiListen implements MultiListen by consuming stdlib functions as dependency allowing
|
||||
// mocking for unit-testing.
|
||||
func multiListen(
|
||||
ctx context.Context,
|
||||
network string,
|
||||
addrs []string,
|
||||
listenFunc func(ctx context.Context, network, address string) (net.Listener, error),
|
||||
) (net.Listener, error) {
|
||||
if !(network == "tcp" || network == "tcp4" || network == "tcp6") {
|
||||
return nil, fmt.Errorf("network %q not supported", network)
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("no address provided to listen on")
|
||||
}
|
||||
|
||||
ml := &multiListener{
|
||||
connCh: make(chan connErrPair),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
l, err := listenFunc(ctx, network, addr)
|
||||
if err != nil {
|
||||
// close all the sub-listeners and exit
|
||||
_ = ml.Close()
|
||||
return nil, err
|
||||
}
|
||||
ml.listeners = append(ml.listeners, l)
|
||||
}
|
||||
|
||||
for _, l := range ml.listeners {
|
||||
ml.wg.Add(1)
|
||||
go func(l net.Listener) {
|
||||
defer ml.wg.Done()
|
||||
for {
|
||||
// Accept() is blocking, unless ml.Close() is called, in which
|
||||
// case it will return immediately with an error.
|
||||
conn, err := l.Accept()
|
||||
// This assumes that ANY error from Accept() will terminate the
|
||||
// sub-listener. We could maybe be more precise, but it
|
||||
// doesn't seem necessary.
|
||||
terminate := err != nil
|
||||
|
||||
select {
|
||||
case ml.connCh <- connErrPair{conn: conn, err: err}:
|
||||
case <-ml.stopCh:
|
||||
// In case we accepted a connection AND were stopped, and
|
||||
// this select-case was chosen, just throw away the
|
||||
// connection. This avoids potentially blocking on connCh
|
||||
// or leaking a connection.
|
||||
if conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
terminate = true
|
||||
}
|
||||
// Make sure we don't loop on Accept() returning an error and
|
||||
// the select choosing the channel case.
|
||||
if terminate {
|
||||
return
|
||||
}
|
||||
}
|
||||
}(l)
|
||||
}
|
||||
return ml, nil
|
||||
}
|
||||
|
||||
// Accept implements net.Listener. It waits for and returns a connection from
|
||||
// any of the sub-listener.
|
||||
func (ml *multiListener) Accept() (net.Conn, error) {
|
||||
// wait for any sub-listener to enqueue an accepted connection
|
||||
connErr, ok := <-ml.connCh
|
||||
if !ok {
|
||||
// The channel will be closed only when Close() is called on the
|
||||
// multiListener. Closing of this channel implies that all
|
||||
// sub-listeners are also closed, which causes a "use of closed
|
||||
// network connection" error on their Accept() calls. We return the
|
||||
// same error for multiListener.Accept() if multiListener.Close()
|
||||
// has already been called.
|
||||
return nil, fmt.Errorf("use of closed network connection")
|
||||
}
|
||||
return connErr.conn, connErr.err
|
||||
}
|
||||
|
||||
// Close implements net.Listener. It will close all sub-listeners and wait for
|
||||
// the go-routines to exit.
|
||||
func (ml *multiListener) Close() error {
|
||||
// Make sure this can be called repeatedly without explosions.
|
||||
select {
|
||||
case <-ml.stopCh:
|
||||
return fmt.Errorf("use of closed network connection")
|
||||
default:
|
||||
}
|
||||
|
||||
// Tell all sub-listeners to stop.
|
||||
close(ml.stopCh)
|
||||
|
||||
// Closing the listeners causes Accept() to immediately return an error in
|
||||
// the sub-listener go-routines.
|
||||
for _, l := range ml.listeners {
|
||||
_ = l.Close()
|
||||
}
|
||||
|
||||
// Wait for all the sub-listener go-routines to exit.
|
||||
ml.wg.Wait()
|
||||
close(ml.connCh)
|
||||
|
||||
// Drain any already-queued connections.
|
||||
for connErr := range ml.connCh {
|
||||
if connErr.conn != nil {
|
||||
_ = connErr.conn.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr is an implementation of the net.Listener interface. It always returns
|
||||
// the address of the first listener. Callers should use conn.LocalAddr() to
|
||||
// obtain the actual local address of the sub-listener.
|
||||
func (ml *multiListener) Addr() net.Addr {
|
||||
return ml.listeners[0].Addr()
|
||||
}
|
||||
|
||||
// Addrs is like Addr, but returns the address for all registered listeners.
|
||||
func (ml *multiListener) Addrs() []net.Addr {
|
||||
var ret []net.Addr
|
||||
for _, l := range ml.listeners {
|
||||
ret = append(ret, l.Addr())
|
||||
}
|
||||
return ret
|
||||
}
|
||||
2
vendor/k8s.io/utils/trace/trace.go
generated
vendored
2
vendor/k8s.io/utils/trace/trace.go
generated
vendored
@@ -192,7 +192,7 @@ func (t *Trace) Log() {
|
||||
t.endTime = &endTime
|
||||
t.lock.Unlock()
|
||||
// an explicit logging request should dump all the steps out at the higher level
|
||||
if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace
|
||||
if t.parentTrace == nil && klogV(2) { // We don't start logging until Log or LogIfLong is called on the root trace
|
||||
t.logTrace()
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@@ -1232,7 +1232,7 @@ k8s.io/kube-openapi/pkg/validation/errors
|
||||
k8s.io/kube-openapi/pkg/validation/spec
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
# k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
## explicit; go 1.18
|
||||
k8s.io/utils/buffer
|
||||
k8s.io/utils/clock
|
||||
|
||||
Reference in New Issue
Block a user