mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-25 20:59:28 +01:00
Merge pull request #221 from damemi/descheduling-interval
Wire --descheduling-interval to run descheduler on a loop
This commit is contained in:
@@ -48,7 +48,7 @@ func NewDeschedulerServer() *DeschedulerServer {
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "time interval between two consecutive descheduler executions")
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
|
||||
@@ -21,7 +21,9 @@ import (
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -29,7 +31,6 @@ import (
|
||||
)
|
||||
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
|
||||
rsclient, err := client.CreateClient(rs.KubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -44,6 +45,10 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return fmt.Errorf("deschedulerPolicy is nil")
|
||||
}
|
||||
|
||||
return RunDeschedulerStrategies(rs, deschedulerPolicy)
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy) error {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
return err
|
||||
@@ -61,10 +66,18 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
nodePodCount := strategies.InitializeNodePodCount(nodes)
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
wait.Until(func() {
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingInterPodAntiAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingInterPodAntiAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeAffinity(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeAffinity"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
strategies.RemovePodsViolatingNodeTaints(rs, deschedulerPolicy.Strategies["RemovePodsViolatingNodeTaints"], evictionPolicyGroupVersion, nodes, nodePodCount)
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
close(stopChannel)
|
||||
}
|
||||
}, rs.DeschedulingInterval, stopChannel)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,9 @@ import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
@@ -157,6 +159,35 @@ func TestE2E(t *testing.T) {
|
||||
evictPods(t, clientSet, nodeList, rc)
|
||||
}
|
||||
|
||||
func TestDeschedulingInterval(t *testing.T) {
|
||||
clientSet, err := client.CreateClient("/tmp/admin.conf")
|
||||
if err != nil {
|
||||
t.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
|
||||
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
|
||||
s := options.NewDeschedulerServer()
|
||||
s.Client = clientSet
|
||||
|
||||
deschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
|
||||
c := make(chan bool)
|
||||
go func() {
|
||||
err := descheduler.RunDeschedulerStrategies(s, deschedulerPolicy)
|
||||
if err != nil {
|
||||
t.Errorf("Error running descheduler strategies: %+v", err)
|
||||
}
|
||||
c <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-c:
|
||||
// successfully returned
|
||||
case <-time.After(3 * time.Minute):
|
||||
t.Errorf("descheduler.Run timed out even without descheduling-interval set")
|
||||
}
|
||||
}
|
||||
|
||||
func evictPods(t *testing.T, clientSet clientset.Interface, nodeList *v1.NodeList, rc *v1.ReplicationController) {
|
||||
var leastLoadedNode v1.Node
|
||||
podsBefore := math.MaxInt16
|
||||
|
||||
Reference in New Issue
Block a user