1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Implement part of low node utilization strategy.

This commit is contained in:
Avesh Agarwal
2017-08-04 15:44:54 -04:00
parent 6178d99993
commit 0f8ce97527
3 changed files with 68 additions and 26 deletions

View File

@@ -54,31 +54,19 @@ func Run(rs *options.ReschedulerServer) error {
}
fmt.Printf("\nreschedulerPolicy: %#v\n", reschedulerPolicy)
policyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(policyGroupVersion) == 0 {
return err
}
strategies.RemoveDuplicatePods(rs.Client, policyGroupVersion)
/*stopChannel := make(chan struct{})
nodes, err := node.ReadyNodes(rs.Client, stopChannel)
if err != nil {
return err
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(client, stopChannel)
if err != nil {
return err
}
for _, n := range nodes {
fmt.Printf("\nnode = %#v\n", n)
}
for _, node := range nodes {
pods, err := pod.ListPodsOnANode(rs.Client, node)
if err != nil {
return err
}
for _, p := range pods {
fmt.Printf("\npod = %#v\n", p)
}
}*/
strategies.LowNodeUtilization(rs.Client, policyGroupVersion, nodes)
strategies.RemoveDuplicatePods(rs.Client, policyGroupVersion, nodes)
return nil
}

View File

@@ -31,12 +31,7 @@ import (
//type creator string
type DuplicatePodsMap map[string][]*v1.Pod
func RemoveDuplicatePods(client clientset.Interface, policyGroupVersion string) error {
stopChannel := make(chan struct{})
nodes, err := node.ReadyNodes(client, stopChannel)
if err != nil {
return err
}
func RemoveDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node) error {
for _, node := range nodes {
fmt.Printf("\nProcessing node: %#v\n", node.Name)
dpm := RemoveDuplicatePodsOnANode(client, node)

View File

@@ -15,3 +15,62 @@ limitations under the License.
*/
package strategies
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/resource"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"github.com/aveshagarwal/rescheduler/pkg/api"
podutil "github.com/aveshagarwal/rescheduler/pkg/rescheduler/pod"
)
func LowNodeUtilization(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node) {
for _, node := range nodes {
fmt.Printf("Node %#v usage: %#v\n", node.Name, NodeUtilization(client, node))
}
}
func NodeUtilization(client clientset.Interface, node *v1.Node) api.ResourceThresholds {
pods, err := podutil.ListPodsOnANode(client, node)
if err != nil {
return nil
}
totalReqs := map[v1.ResourceName]resource.Quantity{}
for pod := range pods {
if podutil.IsBestEffortPod(pod) {
continue
}
req, _, err := resource.PodRequestsAndLimits(pod)
if err != nil {
fmt.Printf("Error computing resource usage of pod, ignoring: %#v\n", pod.Name)
continue
}
for name, quantity := range req {
if name == v1.ResourceCPU || name == v1.ResourceMemory {
if value, ok := totalReqs[name]; !ok {
totalReqs[name] = *quantity.Copy()
} else {
value.Add(quantity)
totalReqs[name] = value
}
}
}
}
allocatable := node.Status.Capacity
if len(node.Status.Allocatable) > 0 {
allocatable = node.Status.Allocatable
}
rt := api.ResourceThresholds{}
totalCPUReq := totalReq[v1.ResourceCPU]
totalMemReq := totalReq[v1.ResourceMemory]
totalPods := len(pods)
rt[v1.ResourceCPU] = (float64(totalCPUReq.MilliValue()) * 100) / float64(allocatable.Cpu().MilliValue())
rt[v1.ResourceMmeory] = float64(totalMemReq.Value()) / float64(allocatable.Memory().Value()) * 100
rt[v1.ResourcePods] = (float64(totalPods) * 100) / float64(allocatable.Pods().Value())
return rt
}