mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #19 from ravisantoshgudimetla/introduce-glog
Conversion to glog
This commit is contained in:
@@ -19,31 +19,40 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"flag"
|
||||
"io"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app/options"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
aflag "k8s.io/apiserver/pkg/util/flag"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand() *cobra.Command {
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s := options.NewDeschedulerServer()
|
||||
s.AddFlags(pflag.CommandLine)
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
err := Run(s)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
cmd.SetOutput(out)
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
s.AddFlags(flags)
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/kubernetes-incubator/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd := app.NewDeschedulerCommand()
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -27,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
@@ -78,7 +78,7 @@ func IsReady(node *v1.Node) bool {
|
||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
fmt.Printf("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
glog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
|
||||
@@ -26,11 +26,12 @@ import (
|
||||
_ "github.com/kubernetes-incubator/descheduler/pkg/api/install"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/api/v1alpha1"
|
||||
"github.com/kubernetes-incubator/descheduler/pkg/descheduler/scheme"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
|
||||
if policyConfigFile == "" {
|
||||
fmt.Printf("policy config file not specified")
|
||||
glog.V(1).Infof("policy config file not specified")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
@@ -47,21 +47,19 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
|
||||
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool) int {
|
||||
podsEvicted := 0
|
||||
for _, node := range nodes {
|
||||
fmt.Printf("\nProcessing node: %#v\n", node.Name)
|
||||
glog.V(1).Infof("\nProcessing node: %#v\n", node.Name)
|
||||
dpm := ListDuplicatePodsOnANode(client, node)
|
||||
for creator, pods := range dpm {
|
||||
if len(pods) > 1 {
|
||||
fmt.Printf("%#v\n", creator)
|
||||
glog.V(1).Infof("%#v\n", creator)
|
||||
// i = 0 does not evict the first pod
|
||||
for i := 1; i < len(pods); i++ {
|
||||
//fmt.Printf("Removing duplicate pod %#v\n", k.Name)
|
||||
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
|
||||
if !success {
|
||||
//TODO: change fmt.Printf as glogs.
|
||||
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
} else {
|
||||
podsEvicted++
|
||||
fmt.Printf("Evicted pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)\n", pods[i].Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ limitations under the License.
|
||||
package strategies
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
helper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
@@ -61,16 +61,16 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
lowNodes, targetNodes, _ := classifyNodes(npm, thresholds, targetThresholds)
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
fmt.Printf("No node is underutilized\n")
|
||||
glog.V(1).Infof("No node is underutilized\n")
|
||||
return
|
||||
} else if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
|
||||
fmt.Printf("number of nodes underutilized is less than NumberOfNodes\n")
|
||||
glog.V(1).Infof("number of nodes underutilized is less than NumberOfNodes\n")
|
||||
return
|
||||
} else if len(lowNodes) == len(nodes) {
|
||||
fmt.Printf("all nodes are underutilized\n")
|
||||
glog.V(1).Infof("all nodes are underutilized\n")
|
||||
return
|
||||
} else if len(targetNodes) == 0 {
|
||||
fmt.Printf("no node is above target utilization\n")
|
||||
glog.V(1).Infof("no node is above target utilization\n")
|
||||
return
|
||||
}
|
||||
evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun)
|
||||
@@ -78,7 +78,7 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
|
||||
|
||||
func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
if thresholds == nil {
|
||||
fmt.Printf("no resource threshold is configured\n")
|
||||
glog.V(1).Infof("no resource threshold is configured\n")
|
||||
return false
|
||||
}
|
||||
found := false
|
||||
@@ -89,7 +89,7 @@ func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
fmt.Printf("one of cpu, memory, or pods resource threshold must be configured\n")
|
||||
glog.V(1).Infof("one of cpu, memory, or pods resource threshold must be configured\n")
|
||||
return false
|
||||
}
|
||||
return found
|
||||
@@ -98,10 +98,10 @@ func validateThresholds(thresholds api.ResourceThresholds) bool {
|
||||
//This function could be merged into above once we are clear.
|
||||
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
|
||||
if targetThresholds == nil {
|
||||
fmt.Printf("no target resource threshold is configured\n")
|
||||
glog.V(1).Infof("no target resource threshold is configured\n")
|
||||
return false
|
||||
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
|
||||
fmt.Printf("no target resource threshold for pods is configured\n")
|
||||
glog.V(1).Infof("no target resource threshold for pods is configured\n")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -112,7 +112,8 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
|
||||
for node, pods := range npm {
|
||||
usage, nonRemovablePods, bePods, bPods, gPods := NodeUtilization(node, pods)
|
||||
nuMap := NodeUsageMap{node, usage, nonRemovablePods, bePods, bPods, gPods}
|
||||
fmt.Printf("Node %#v usage: %#v\n", node.Name, usage)
|
||||
glog.V(1).Infof("Node %#v usage: %#v\n", node.Name, usage)
|
||||
|
||||
if IsNodeWithLowUtilization(usage, thresholds) {
|
||||
lowNodes = append(lowNodes, nuMap)
|
||||
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
|
||||
@@ -159,7 +160,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
if len(node.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.node.Status.Allocatable
|
||||
}
|
||||
fmt.Printf("evicting pods from node %#v with usage: %#v\n", node.node.Name, node.usage)
|
||||
glog.V(1).Infof("evicting pods from node %#v with usage: %#v\n", node.node.Name, node.usage)
|
||||
// evict best effort pods
|
||||
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCpu, &totalMem, &podsEvicted, dryRun)
|
||||
// evict burstable pods
|
||||
@@ -170,6 +171,8 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
|
||||
return podsEvicted
|
||||
}
|
||||
|
||||
|
||||
|
||||
func evictPods(inputPods []*v1.Pod,
|
||||
client clientset.Interface,
|
||||
evictionPolicyGroupVersion string,
|
||||
@@ -188,9 +191,9 @@ func evictPods(inputPods []*v1.Pod,
|
||||
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
|
||||
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
|
||||
if !success {
|
||||
fmt.Printf("Error when evicting pod: %#v (%#v)\n", pod.Name, err)
|
||||
glog.Infof("Error when evicting pod: %#v (%#v)\n", pod.Name, err)
|
||||
} else {
|
||||
fmt.Printf("Evicted pod: %#v (%#v)\n", pod.Name, err)
|
||||
glog.V(1).Infof("Evicted pod: %#v (%#v)\n", pod.Name, err)
|
||||
// update remaining pods
|
||||
*podsEvicted++
|
||||
nodeUsage[v1.ResourcePods] -= onePodPercentage
|
||||
@@ -204,7 +207,7 @@ func evictPods(inputPods []*v1.Pod,
|
||||
*totalMem -= float64(mUsage)
|
||||
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
|
||||
|
||||
fmt.Printf("updated node usage: %#v\n", nodeUsage)
|
||||
glog.V(1).Infof("updated node usage: %#v\n", nodeUsage)
|
||||
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
|
||||
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCpu <= 0 && *totalMem <= 0) {
|
||||
break
|
||||
@@ -237,7 +240,7 @@ func CreateNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(client, node)
|
||||
if err != nil {
|
||||
fmt.Printf("node %s will not be processed, error in accessing its pods (%#v)\n", node.Name, err)
|
||||
glog.Infof("node %s will not be processed, error in accessing its pods (%#v)\n", node.Name, err)
|
||||
} else {
|
||||
npm[node] = pods
|
||||
}
|
||||
@@ -299,7 +302,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod) (api.ResourceThresholds, []*
|
||||
|
||||
req, _, err := helper.PodRequestsAndLimits(pod)
|
||||
if err != nil {
|
||||
fmt.Printf("Error computing resource usage of pod, ignoring: %#v\n", pod.Name)
|
||||
glog.Infof("Error computing resource usage of pod, ignoring: %#v\n", pod.Name)
|
||||
continue
|
||||
}
|
||||
for name, quantity := range req {
|
||||
|
||||
Reference in New Issue
Block a user