diff --git a/cmd/descheduler/app/options/options.go b/cmd/descheduler/app/options/options.go index 7adefe2ea..d398592e0 100644 --- a/cmd/descheduler/app/options/options.go +++ b/cmd/descheduler/app/options/options.go @@ -48,7 +48,7 @@ func NewDeschedulerServer() *DeschedulerServer { // AddFlags adds flags for a specific SchedulerServer to the specified FlagSet func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) { - fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "time interval between two consecutive descheduler executions") + fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.") fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.") fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.") fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.") diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index 3bcc4da5f..ee55d855a 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -21,7 +21,9 @@ import ( "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" + "sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/descheduler/client" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" @@ -29,7 +31,6 @@ import ( ) func Run(rs *options.DeschedulerServer) error { - rsclient, err := client.CreateClient(rs.KubeconfigFile) if err != nil { return err @@ -44,6 +45,10 @@ func Run(rs *options.DeschedulerServer) error { return fmt.Errorf("deschedulerPolicy is nil") } + return RunDeschedulerStrategies(rs, deschedulerPolicy) +} + +func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error { evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client) if err != nil || len(evictionPolicyGroupVersion) == 0 { return err @@ -61,10 +66,18 @@ func Run(rs *options.DeschedulerServer) error { } nodePodCount := strategies.InitializeNodePodCount(nodes) - strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) - strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount) + wait.Until(func() { + strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount) + strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount) + strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) + strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount) + strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount) + + // If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration + if rs.DeschedulingInterval.Seconds() == 0 { + close(stopChannel) + } + }, rs.DeschedulingInterval, stopChannel) + return nil } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index edec82e11..2bf8106f0 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -28,7 +28,9 @@ import ( "k8s.io/klog" "k8s.io/kubernetes/pkg/api/testapi" "sigs.k8s.io/descheduler/cmd/descheduler/app/options" + "sigs.k8s.io/descheduler/pkg/api" deschedulerapi "sigs.k8s.io/descheduler/pkg/api" + "sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler/client" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" @@ -157,6 +159,35 @@ func TestE2E(t *testing.T) { evictPods(t, clientSet, nodeList, rc) } +func TestDeschedulingInterval(t *testing.T) { + clientSet, err := client.CreateClient("/tmp/admin.conf") + if err != nil { + t.Errorf("Error during client creation with %v", err) + } + + // By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits + s := options.NewDeschedulerServer() + s.Client = clientSet + + deschedulerPolicy := &api.DeschedulerPolicy{} + + c := make(chan bool) + go func() { + err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy) + if err != nil { + t.Errorf("Error running descheduler strategies: %+v", err) + } + c <- true + }() + + select { + case <-c: + // successfully returned + case <-time.After(3 * time.Minute): + t.Errorf("descheduler.Run timed out even without descheduling-interval set") + } +} + func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) { var leastLoadedNode v1.Node podsBefore := math.MaxInt16