mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Add dry run option.
This commit is contained in:
@@ -34,4 +34,7 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
PolicyConfigFile string
|
||||
|
||||
// Dry run
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
@@ -33,5 +33,8 @@ type DeschedulerConfiguration struct {
|
||||
KubeconfigFile string `json:"kubeconfigFile"`
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
PolicyConfigFile string `json:"policyConfigFile,,omitempty"`
|
||||
PolicyConfigFile string `json:"policyConfigFile,omitempty"`
|
||||
|
||||
// Dry run
|
||||
DryRun bool `json:"dryRun,omitempty"`
|
||||
}
|
||||
|
||||
@@ -53,8 +53,8 @@ func Run(rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
strategies.RemoveDuplicatePods(rs.Client, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.LowNodeUtilization(rs.Client, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.RemoveDuplicatePods(rs, deschedulerPolicy.Strategies["RemoveDuplicates"], evictionPolicyGroupVersion, nodes)
|
||||
strategies.LowNodeUtilization(rs, deschedulerPolicy.Strategies["LowNodeUtilization"], evictionPolicyGroupVersion, nodes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,10 @@ import (
|
||||
eutils "github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions/utils"
|
||||
)
|
||||
|
||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string) (bool, error) {
|
||||
func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) (bool, error) {
|
||||
if dryRun {
|
||||
return true, nil
|
||||
}
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestEvictPod(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1}}, nil
|
||||
})
|
||||
evicted, _ := EvictPod(fakeClient, p1, "v1")
|
||||
evicted, _ := EvictPod(fakeClient, p1, "v1", false)
|
||||
if !evicted {
|
||||
t.Errorf("Expected %v pod to be evicted", p1.Name)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
//TODO: Change to client-go instead of generated clientset.
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
@@ -35,15 +36,15 @@ type DuplicatePodsMap map[string][]*v1.Pod
|
||||
// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node.
|
||||
// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same
|
||||
// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
func RemoveDuplicatePods(client clientset.Interface, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
deleteDuplicatePods(client, policyGroupVersion, nodes)
|
||||
deleteDuplicatePods(ds.Client, policyGroupVersion, nodes, ds.DryRun)
|
||||
}
|
||||
|
||||
// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods.
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node) int {
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
fmt.Printf("\nProcessing node: %#v\n", node.Name)
|
||||
@@ -54,7 +55,7 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
//fmt.Printf("Removing duplicate pod %#v\n", k.Name)
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion)
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
//TODO: change fmt.Printf as glogs.
|
||||
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node})
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
helper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/evictions"
|
||||
podutil "github.com/kubernetes-incubator/descheduler/pkg/descheduler/pod"
|
||||
@@ -39,7 +40,7 @@ type NodeUsageMap struct {
|
||||
}
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
|
||||
func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
||||
func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerStrategy, evictionPolicyGroupVersion string, nodes []*v1.Node) {
|
||||
if !strategy.Enabled {
|
||||
return
|
||||
}
|
||||
@@ -55,7 +56,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
||||
return
|
||||
}
|
||||
|
||||
npm := CreateNodePodsMap(client, nodes)
|
||||
npm := CreateNodePodsMap(ds.Client, nodes)
|
||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
@@ -71,7 +72,7 @@ func LowNodeUtilization(client clientset.Interface, strategy api.DeschedulerStra
|
||||
fmt.Printf("no node is above target utilization\n")
|
||||
return
|
||||
}
|
||||
evictPodsFromTargetNodes(client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds)
|
||||
evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
|
||||
}
|
||||
|
||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
@@ -123,7 +124,7 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
return lowNodes, targetNodes, otherNodes
|
||||
}
|
||||
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes []NodeUsageMap, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds) int {
|
||||
func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVersion string, targetNodes []NodeUsageMap, lowNodes []NodeUsageMap, targetThresholds api.ResourceThresholds, dryRun bool) int {
|
||||
podsEvicted := 0
|
||||
|
||||
SortNodesByUsage(targetNodes)
|
||||
@@ -149,7 +150,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
onePodPercentage := api.Percentage((float64(1) * 100) / float64(nodeCapcity.Pods().Value()))
|
||||
if nodePodsUsage > targetThresholds[v1.ResourcePods] && totalPods > 0 {
|
||||
for _, pod := range node.bePods {
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
if !success {
|
||||
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pod.Name, err)
|
||||
} else {
|
||||
@@ -164,7 +165,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
}
|
||||
if nodePodsUsage > targetThresholds[v1.ResourcePods] && totalPods > 0 {
|
||||
for _, pod := range node.otherPods {
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
if !success {
|
||||
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pod.Name, err)
|
||||
} else {
|
||||
|
||||
@@ -103,7 +103,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted := 4
|
||||
npm := CreateNodePodsMap(fakeClient, []*v1.Node{n1, n2})
|
||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds)
|
||||
podsEvicted := evictPodsFromTargetNodes(fakeClient, "v1", targetNodes, lowNodes, targetThresholds, false)
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", expectedPodsEvicted)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user