mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #969 from Dentrax/gofumpt
run: `gofumpt -w -extra .`
This commit is contained in:
@@ -5,11 +5,14 @@ linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- gosimple
|
||||
- gocyclo
|
||||
- misspell
|
||||
- govet
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
9
Makefile
9
Makefile
@@ -29,6 +29,9 @@ LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}
|
||||
GOLANGCI_VERSION := v1.49.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.4.0
|
||||
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
# registry, not production.
|
||||
@@ -137,6 +140,12 @@ ifndef HAS_GOLANGCI
|
||||
endif
|
||||
./_output/bin/golangci-lint run
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
go install mvdan.cc/gofumpt@${GOFUMPT_VERSION}
|
||||
endif
|
||||
gofumpt -w -extra .
|
||||
|
||||
# helm
|
||||
|
||||
ensure-helm-install:
|
||||
|
||||
@@ -43,7 +43,6 @@ import (
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s, err := options.NewDeschedulerServer()
|
||||
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
func NewVersionCommand() *cobra.Command {
|
||||
var versionCmd = &cobra.Command{
|
||||
versionCmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Version of descheduler",
|
||||
Long: `Prints the version of descheduler.`,
|
||||
|
||||
@@ -39,6 +39,13 @@ make test-unit
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
## Format Code
|
||||
|
||||
After making changes in the code base, ensure that the code is formatted correctly:
|
||||
|
||||
```
|
||||
make fmt
|
||||
```
|
||||
|
||||
## Build Helm Package locally
|
||||
|
||||
|
||||
@@ -51,8 +51,10 @@ type DeschedulerPolicy struct {
|
||||
MaxNoOfPodsToEvictPerNamespace *uint
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
||||
type (
|
||||
StrategyName string
|
||||
StrategyList map[StrategyName]DeschedulerStrategy
|
||||
)
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
@@ -93,8 +95,10 @@ type StrategyParameters struct {
|
||||
ExcludedTaints []string
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
)
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool
|
||||
|
||||
@@ -28,8 +28,10 @@ var (
|
||||
)
|
||||
|
||||
// GroupName is the group name used in this package
|
||||
const GroupName = "descheduler"
|
||||
const GroupVersion = "v1alpha1"
|
||||
const (
|
||||
GroupName = "descheduler"
|
||||
GroupVersion = "v1alpha1"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||
|
||||
@@ -51,8 +51,10 @@ type DeschedulerPolicy struct {
|
||||
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
}
|
||||
|
||||
type StrategyName string
|
||||
type StrategyList map[StrategyName]DeschedulerStrategy
|
||||
type (
|
||||
StrategyName string
|
||||
StrategyList map[StrategyName]DeschedulerStrategy
|
||||
)
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
@@ -90,8 +92,10 @@ type StrategyParameters struct {
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
)
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
|
||||
@@ -28,8 +28,10 @@ var (
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "deschedulercomponentconfig"
|
||||
const GroupVersion = "v1alpha1"
|
||||
const (
|
||||
GroupName = "deschedulercomponentconfig"
|
||||
GroupVersion = "v1alpha1"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func CreateClient(kubeconfig string, userAgt string) (clientset.Interface, error) {
|
||||
func CreateClient(kubeconfig, userAgt string) (clientset.Interface, error) {
|
||||
var cfg *rest.Config
|
||||
if len(kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(kubeconfig)
|
||||
|
||||
@@ -33,8 +33,10 @@ import (
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
type nodePodEvictedCount map[string]uint
|
||||
type namespacePodEvictCount map[string]uint
|
||||
type (
|
||||
nodePodEvictedCount map[string]uint
|
||||
namespacePodEvictCount map[string]uint
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
@@ -59,8 +61,8 @@ func NewPodEvictor(
|
||||
metricsEnabled bool,
|
||||
eventRecorder events.EventRecorder,
|
||||
) *PodEvictor {
|
||||
var nodePodCount = make(nodePodEvictedCount)
|
||||
var namespacePodCount = make(namespacePodEvictCount)
|
||||
nodePodCount := make(nodePodEvictedCount)
|
||||
namespacePodCount := make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node.Name] = 0
|
||||
|
||||
@@ -81,7 +81,7 @@ func TestPodTypes(t *testing.T) {
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
//p2.Annotations = test.GetDaemonSetAnnotation()
|
||||
// p2.Annotations = test.GetDaemonSetAnnotation()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -91,7 +91,8 @@ func TestPodTypes(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -111,5 +112,4 @@ func TestPodTypes(t *testing.T) {
|
||||
if utils.IsDaemonsetPod(ownerRefList) || utils.IsPodWithLocalStorage(p1) || utils.IsCriticalPriorityPod(p1) || utils.IsMirrorPod(p1) || utils.IsStaticPod(p1) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -19,13 +19,14 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
// NewLeaderElection starts the leader election code loop
|
||||
|
||||
@@ -56,7 +56,6 @@ func TestReadyNodes(t *testing.T) {
|
||||
if IsReady(node5) {
|
||||
t.Errorf("Expected %v to be not ready", node5.Name)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
@@ -111,11 +110,9 @@ func TestIsNodeUnschedulable(t *testing.T) {
|
||||
t.Errorf("Test %#v failed", test.description)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPodFitsCurrentNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
|
||||
@@ -756,7 +753,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// createResourceList builds a small resource list of core resources
|
||||
func createResourceList(cpu int64, memory int64, ephemeralStorage int64) v1.ResourceList {
|
||||
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||
|
||||
@@ -10,9 +10,7 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
var (
|
||||
thresholdPriority int32 = 1000
|
||||
)
|
||||
var thresholdPriority int32 = 1000
|
||||
|
||||
func TestValidStrategyParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -25,24 +25,31 @@ var _ framework.Handle = &HandleImpl{}
|
||||
func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.GetPodsAssignedToNodeFuncImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
return hi.SharedInformerFactoryImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Evictor() framework.Evictor {
|
||||
return hi
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Filter(pod *v1.Pod) bool {
|
||||
return hi.EvictorFilterImpl.Filter(pod)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return hi.EvictorFilterImpl.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return hi.PodEvictorImpl.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
@@ -300,7 +300,6 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -474,7 +473,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -494,7 +494,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -515,7 +516,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -706,7 +708,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -758,7 +759,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
if (result) != test.result {
|
||||
t.Errorf("Filter should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package nodeutilization
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_LowNodeUtilizationArgs(t *testing.T) {
|
||||
|
||||
@@ -105,7 +105,8 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -510,7 +511,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -662,7 +662,6 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -90,7 +90,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -139,7 +140,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -198,7 +200,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -258,7 +261,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -312,7 +316,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -381,7 +386,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -447,7 +453,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -521,7 +528,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -602,7 +610,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -665,7 +674,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -756,7 +766,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -806,7 +817,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -901,7 +913,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -915,7 +926,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
@@ -1075,7 +1085,6 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -1089,7 +1098,6 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
|
||||
@@ -227,7 +227,7 @@ func evictPodsFromSourceNodes(
|
||||
v1.ResourceMemory: {},
|
||||
}
|
||||
|
||||
var taintsOfDestinationNodes = make(map[string][]v1.Taint, len(destinationNodes))
|
||||
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
for _, node := range destinationNodes {
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
@@ -282,7 +282,6 @@ func evictPods(
|
||||
podEvictor framework.Evictor,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
|
||||
var excludedNamespaces sets.String
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.NewString(evictableNamespaces.Exclude...)
|
||||
@@ -438,7 +437,6 @@ func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.G
|
||||
total[resource] += api.Percentage(value.MilliValue()) / api.Percentage(nodeCapacityValue.MilliValue()) * 100.0
|
||||
} else {
|
||||
total[resource] += api.Percentage(value.Value()) / api.Percentage(nodeCapacityValue.Value()) * 100.0
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
|
||||
@@ -18,13 +18,14 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
var extendedResource = v1.ResourceName("example.com/foo")
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
@@ -165,7 +166,6 @@ func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
|
||||
for _, testCase := range tests {
|
||||
args := &LowNodeUtilizationArgs{
|
||||
|
||||
Thresholds: testCase.thresholds,
|
||||
TargetThresholds: testCase.targetThresholds,
|
||||
}
|
||||
|
||||
@@ -14,13 +14,14 @@ limitations under the License.
|
||||
package podlifetime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
|
||||
|
||||
@@ -371,7 +371,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package podlifetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
@@ -17,8 +17,9 @@ limitations under the License.
|
||||
package podlifetime
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
|
||||
@@ -14,11 +14,12 @@ limitations under the License.
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemoveDuplicatesArgs(t *testing.T) {
|
||||
|
||||
@@ -142,7 +142,8 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -366,7 +367,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
@@ -788,7 +788,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -14,13 +14,14 @@ limitations under the License.
|
||||
package removefailedpods
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemoveFailedPodsArgs(t *testing.T) {
|
||||
|
||||
@@ -36,15 +36,14 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
OneHourInSeconds uint = 3600
|
||||
)
|
||||
var OneHourInSeconds uint = 3600
|
||||
|
||||
func TestRemoveFailedPods(t *testing.T) {
|
||||
createRemoveFailedPodsArgs := func(
|
||||
includingInitContainers bool,
|
||||
reasons, excludeKinds []string,
|
||||
minAgeSeconds *uint) RemoveFailedPodsArgs {
|
||||
minAgeSeconds *uint,
|
||||
) RemoveFailedPodsArgs {
|
||||
return RemoveFailedPodsArgs{
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
Reasons: reasons,
|
||||
@@ -317,7 +316,6 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemovePodsHavingTooManyRestartsArgs(t *testing.T) {
|
||||
|
||||
@@ -74,7 +74,8 @@ func initPods(node *v1.Node) []*v1.Pod {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -225,7 +226,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -278,7 +278,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package removepodsviolatinginterpodantiaffinity
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
|
||||
|
||||
@@ -190,7 +190,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -243,7 +242,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ package removepodsviolatinginterpodantiaffinity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package removepodsviolatingnodeaffinity
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemovePodsViolatingNodeAffinityArgs(t *testing.T) {
|
||||
|
||||
@@ -14,13 +14,12 @@ limitations under the License.
|
||||
package removepodsviolatingnodeaffinity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
|
||||
@@ -242,7 +242,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package removepodsviolatingnodeaffinity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package removepodsviolatingnodetaints
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
|
||||
@@ -139,7 +139,8 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -175,14 +176,13 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
includePreferNoSchedule bool
|
||||
excludedTaints []string
|
||||
}{
|
||||
|
||||
{
|
||||
description: "Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 1, //p2 gets evicted
|
||||
expectedEvictedPodCount: 1, // p2 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Pods with tolerations but not tolerating node taint should be evicted",
|
||||
@@ -190,7 +190,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 1, //p4 gets evicted
|
||||
expectedEvictedPodCount: 1, // p4 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||
@@ -199,7 +199,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
@@ -208,7 +208,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical pods not tolerating node taint should not be evicted",
|
||||
@@ -216,7 +216,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 0, //nothing is evicted
|
||||
expectedEvictedPodCount: 0, // nothing is evicted
|
||||
},
|
||||
{
|
||||
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||
@@ -224,7 +224,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 1, //p9 gets evicted
|
||||
expectedEvictedPodCount: 1, // p9 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||
@@ -232,7 +232,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 1, //p11 gets evicted
|
||||
expectedEvictedPodCount: 1, // p11 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
|
||||
@@ -240,7 +240,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 2, //p2 and p7 are evicted
|
||||
expectedEvictedPodCount: 2, // p2 and p7 are evicted
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
|
||||
@@ -248,7 +248,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
@@ -257,7 +257,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
@@ -266,7 +266,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
@@ -319,14 +319,13 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 0, //p2 and p7 can't be evicted
|
||||
expectedEvictedPodCount: 0, // p2 and p7 can't be evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -379,7 +378,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -412,7 +410,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestToleratesTaint(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
toleration v1.Toleration
|
||||
|
||||
@@ -18,6 +18,7 @@ package removepodsviolatingnodetaints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package removepodsviolatingnodetaints
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
|
||||
@@ -14,12 +14,13 @@ limitations under the License.
|
||||
package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(t *testing.T) {
|
||||
|
||||
@@ -282,8 +282,8 @@ func balanceDomains(
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
isEvictable func(pod *v1.Pod) bool,
|
||||
nodes []*v1.Node) {
|
||||
|
||||
nodes []*v1.Node,
|
||||
) {
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
|
||||
|
||||
@@ -1188,7 +1188,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ package removepodsviolatingtopologyspreadconstraint
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -186,7 +186,6 @@ func maxResourceList(list, new v1.ResourceList) {
|
||||
|
||||
// PodToleratesTaints returns true if a pod tolerates one node's taints
|
||||
func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
|
||||
for nodeName, taintsForNode := range taintsOfNodes {
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) {
|
||||
|
||||
|
||||
@@ -189,7 +189,6 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -92,7 +92,6 @@ func TestFailedPods(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -233,7 +233,6 @@ func runPodLifetimePlugin(
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -188,7 +188,6 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// BuildTestPod creates a test pod with given parameters.
|
||||
func BuildTestPod(name string, cpu int64, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
func BuildTestPod(name string, cpu, memory int64, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
@@ -94,7 +94,7 @@ func GetDaemonSetOwnerRefList() []metav1.OwnerReference {
|
||||
}
|
||||
|
||||
// BuildTestNode creates a node with specified capacity.
|
||||
func BuildTestNode(name string, millicpu int64, mem int64, pods int64, apply func(*v1.Node)) *v1.Node {
|
||||
func BuildTestNode(name string, millicpu, mem, pods int64, apply func(*v1.Node)) *v1.Node {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
||||
Reference in New Issue
Block a user