1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Switch from glog to klog

This commit is contained in:
Mike Dame
2019-10-28 17:00:12 -04:00
parent dd54f1a656
commit 4b9e732c18
10 changed files with 69 additions and 71 deletions

View File

@@ -24,11 +24,11 @@ import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"github.com/golang/glog"
"github.com/spf13/cobra"
aflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/klog"
)
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
@@ -43,7 +43,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
defer logs.FlushLogs()
err := Run(s)
if err != nil {
glog.Errorf("%v", err)
klog.Errorf("%v", err)
}
},
}

View File

@@ -19,7 +19,7 @@ package descheduler
import (
"fmt"
"github.com/golang/glog"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
@@ -56,7 +56,7 @@ func Run(rs *options.DeschedulerServer) error {
}
if len(nodes) <= 1 {
glog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
klog.V(1).Infof("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
return nil
}

View File

@@ -19,7 +19,6 @@ package node
import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@@ -27,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"sigs.k8s.io/descheduler/pkg/utils"
)
@@ -48,7 +48,7 @@ func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-c
}
if len(nodes) == 0 {
glog.V(2).Infof("node lister returned empty list, now fetch directly")
klog.V(2).Infof("node lister returned empty list, now fetch directly")
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
@@ -99,19 +99,19 @@ func IsReady(node *v1.Node) bool {
// - NodeOutOfDisk condition status is ConditionFalse,
// - NodeNetworkUnavailable condition status is ConditionFalse.
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
glog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
klog.V(1).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} /*else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
klog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false
}*/
}
// Ignore nodes that are marked unschedulable
/*if node.Spec.Unschedulable {
glog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
klog.V(4).Infof("Ignoring node %v since it is unschedulable", node.Name)
return false
}*/
return true
@@ -135,7 +135,7 @@ func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
}
if ok {
if !IsNodeUschedulable(node) {
glog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
klog.V(2).Infof("Pod %v can possibly be scheduled on %v", pod.Name, node.Name)
return true
}
return false
@@ -150,15 +150,15 @@ func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
ok, err := utils.PodMatchNodeSelector(pod, node)
if err != nil {
glog.Error(err)
klog.Error(err)
return false
}
if !ok {
glog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
klog.V(1).Infof("Pod %v does not fit on node %v", pod.Name, node.Name)
return false
}
glog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
klog.V(3).Infof("Pod %v fits on node %v", pod.Name, node.Name)
return true
}

View File

@@ -21,8 +21,8 @@ import (
"io/ioutil"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
"github.com/golang/glog"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
@@ -30,7 +30,7 @@ import (
func LoadPolicyConfig(policyConfigFile string) (*api.DeschedulerPolicy, error) {
if policyConfigFile == "" {
glog.V(1).Infof("policy config file not specified")
klog.V(1).Infof("policy config file not specified")
return nil, nil
}

View File

@@ -19,10 +19,9 @@ package strategies
import (
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
@@ -47,11 +46,11 @@ func RemoveDuplicatePods(ds *options.DeschedulerServer, strategy api.Descheduler
func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodepodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v", node.Name)
klog.V(1).Infof("Processing node: %#v", node.Name)
dpm := ListDuplicatePodsOnANode(client, node, evictLocalStoragePods)
for creator, pods := range dpm {
if len(pods) > 1 {
glog.V(1).Infof("%#v", creator)
klog.V(1).Infof("%#v", creator)
// i = 0 does not evict the first pod
for i := 1; i < len(pods); i++ {
if maxPodsToEvict > 0 && nodepodCount[node]+1 > maxPodsToEvict {
@@ -59,10 +58,10 @@ func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string,
}
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
klog.Infof("Error when evicting pod: %#v (%#v)", pods[i].Name, err)
} else {
nodepodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
klog.V(1).Infof("Evicted pod: %#v (%#v)", pods[i].Name, err)
}
}
}

View File

@@ -19,10 +19,10 @@ package strategies
import (
"sort"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
helper "k8s.io/kubernetes/pkg/api/v1/resource"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
@@ -63,42 +63,42 @@ func LowNodeUtilization(ds *options.DeschedulerServer, strategy api.DeschedulerS
npm := createNodePodsMap(ds.Client, nodes)
lowNodes, targetNodes := classifyNodes(npm, thresholds, targetThresholds, ds.EvictLocalStoragePods)
glog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
klog.V(1).Infof("Criteria for a node under utilization: CPU: %v, Mem: %v, Pods: %v",
thresholds[v1.ResourceCPU], thresholds[v1.ResourceMemory], thresholds[v1.ResourcePods])
if len(lowNodes) == 0 {
glog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
klog.V(1).Infof("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
glog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
klog.V(1).Infof("Total number of underutilized nodes: %v", len(lowNodes))
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
glog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
klog.V(1).Infof("number of nodes underutilized (%v) is less than NumberOfNodes (%v), nothing to do here", len(lowNodes), strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}
if len(lowNodes) == len(nodes) {
glog.V(1).Infof("all nodes are underutilized, nothing to do here")
klog.V(1).Infof("all nodes are underutilized, nothing to do here")
return
}
if len(targetNodes) == 0 {
glog.V(1).Infof("all nodes are under target utilization, nothing to do here")
klog.V(1).Infof("all nodes are under target utilization, nothing to do here")
return
}
glog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
klog.V(1).Infof("Criteria for a node above target utilization: CPU: %v, Mem: %v, Pods: %v",
targetThresholds[v1.ResourceCPU], targetThresholds[v1.ResourceMemory], targetThresholds[v1.ResourcePods])
glog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
klog.V(1).Infof("Total number of nodes above target utilization: %v", len(targetNodes))
totalPodsEvicted := evictPodsFromTargetNodes(ds.Client, evictionPolicyGroupVersion, targetNodes, lowNodes, targetThresholds, ds.DryRun, ds.MaxNoOfPodsToEvictPerNode, nodepodCount)
glog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
klog.V(1).Infof("Total number of pods evicted: %v", totalPodsEvicted)
}
func validateThresholds(thresholds api.ResourceThresholds) bool {
if thresholds == nil || len(thresholds) == 0 {
glog.V(1).Infof("no resource threshold is configured")
klog.V(1).Infof("no resource threshold is configured")
return false
}
for name := range thresholds {
@@ -110,7 +110,7 @@ func validateThresholds(thresholds api.ResourceThresholds) bool {
case v1.ResourcePods:
continue
default:
glog.Errorf("only cpu, memory, or pods thresholds can be specified")
klog.Errorf("only cpu, memory, or pods thresholds can be specified")
return false
}
}
@@ -120,10 +120,10 @@ func validateThresholds(thresholds api.ResourceThresholds) bool {
//This function could be merged into above once we are clear.
func validateTargetThresholds(targetThresholds api.ResourceThresholds) bool {
if targetThresholds == nil {
glog.V(1).Infof("no target resource threshold is configured")
klog.V(1).Infof("no target resource threshold is configured")
return false
} else if _, ok := targetThresholds[v1.ResourcePods]; !ok {
glog.V(1).Infof("no target resource threshold for pods is configured")
klog.V(1).Infof("no target resource threshold for pods is configured")
return false
}
return true
@@ -139,15 +139,15 @@ func classifyNodes(npm NodePodsMap, thresholds api.ResourceThresholds, targetThr
// Check if node is underutilized and if we can schedule pods on it.
if !nodeutil.IsNodeUschedulable(node) && IsNodeWithLowUtilization(usage, thresholds) {
glog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
klog.V(2).Infof("Node %#v is under utilized with usage: %#v", node.Name, usage)
lowNodes = append(lowNodes, nuMap)
} else if IsNodeAboveTargetUtilization(usage, targetThresholds) {
glog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
klog.V(2).Infof("Node %#v is over utilized with usage: %#v", node.Name, usage)
targetNodes = append(targetNodes, nuMap)
} else {
glog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
klog.V(2).Infof("Node %#v is appropriately utilized with usage: %#v", node.Name, usage)
}
glog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
klog.V(2).Infof("allPods:%v, nonRemovablePods:%v, bePods:%v, bPods:%v, gPods:%v", len(allPods), len(nonRemovablePods), len(bePods), len(bPods), len(gPods))
}
return lowNodes, targetNodes
}
@@ -184,20 +184,20 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
}
}
glog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
glog.V(1).Infof("********Number of pods evicted from each node:***********")
klog.V(1).Infof("Total capacity to be moved: CPU:%v, Mem:%v, Pods:%v", totalCPU, totalMem, totalPods)
klog.V(1).Infof("********Number of pods evicted from each node:***********")
for _, node := range targetNodes {
nodeCapacity := node.node.Status.Capacity
if len(node.node.Status.Allocatable) > 0 {
nodeCapacity = node.node.Status.Allocatable
}
glog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
klog.V(3).Infof("evicting pods from node %#v with usage: %#v", node.node.Name, node.usage)
currentPodsEvicted := nodepodCount[node.node]
// Check if one pod has priority, if yes, assume that all pods have priority and evict pods based on priority.
if node.allPods[0].Spec.Priority != nil {
glog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
klog.V(1).Infof("All pods have priority associated with them. Evicting pods based on priority")
evictablePods := make([]*v1.Pod, 0)
evictablePods = append(append(node.bPods, node.bePods...), node.gPods...)
@@ -207,8 +207,8 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
} else {
// TODO: Remove this when we support only priority.
// Falling back to evicting pods based on priority.
glog.V(1).Infof("Evicting pods based on QoS")
glog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
klog.V(1).Infof("Evicting pods based on QoS")
klog.V(1).Infof("There are %v non-evictable pods on the node", len(node.nonRemovablePods))
// evict best effort pods
evictPods(node.bePods, client, evictionPolicyGroupVersion, targetThresholds, nodeCapacity, node.usage, &totalPods, &totalCPU, &totalMem, &currentPodsEvicted, dryRun, maxPodsToEvict)
// evict burstable pods
@@ -218,7 +218,7 @@ func evictPodsFromTargetNodes(client clientset.Interface, evictionPolicyGroupVer
}
nodepodCount[node.node] = currentPodsEvicted
podsEvicted = podsEvicted + nodepodCount[node.node]
glog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
klog.V(1).Infof("%v pods evicted from node %#v with usage %v", nodepodCount[node.node], node.node.Name, node.usage)
}
return podsEvicted
}
@@ -244,9 +244,9 @@ func evictPods(inputPods []*v1.Pod,
mUsage := helper.GetResourceRequest(pod, v1.ResourceMemory)
success, err := evictions.EvictPod(client, pod, evictionPolicyGroupVersion, dryRun)
if !success {
glog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
klog.Warningf("Error when evicting pod: %#v (%#v)", pod.Name, err)
} else {
glog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
klog.V(3).Infof("Evicted pod: %#v (%#v)", pod.Name, err)
// update remaining pods
*podsEvicted++
nodeUsage[v1.ResourcePods] -= onePodPercentage
@@ -260,7 +260,7 @@ func evictPods(inputPods []*v1.Pod,
*totalMem -= float64(mUsage)
nodeUsage[v1.ResourceMemory] -= api.Percentage(float64(mUsage) / float64(nodeCapacity.Memory().Value()) * 100)
glog.V(3).Infof("updated node usage: %#v", nodeUsage)
klog.V(3).Infof("updated node usage: %#v", nodeUsage)
// check if node utilization drops below target threshold or required capacity (cpu, memory, pods) is moved
if !IsNodeAboveTargetUtilization(nodeUsage, targetThresholds) || (*totalPods <= 0 && *totalCPU <= 0 && *totalMem <= 0) {
break
@@ -316,7 +316,7 @@ func createNodePodsMap(client clientset.Interface, nodes []*v1.Node) NodePodsMap
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(client, node)
if err != nil {
glog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
klog.Warningf("node %s will not be processed, error in accessing its pods (%#v)", node.Name, err)
} else {
npm[node] = pods
}

View File

@@ -17,8 +17,9 @@ limitations under the License.
package strategies
import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/klog"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
@@ -37,16 +38,16 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
glog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
klog.V(2).Infof("Executing for nodeAffinityType: %v", nodeAffinity)
switch nodeAffinity {
case "requiredDuringSchedulingIgnoredDuringExecution":
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name)
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(ds.Client, node, evictLocalStoragePods)
if err != nil {
glog.Errorf("failed to get pods from %v: %v", node.Name, err)
klog.Errorf("failed to get pods from %v: %v", node.Name, err)
}
for _, pod := range pods {
@@ -56,7 +57,7 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
if pod.Spec.Affinity != nil && pod.Spec.Affinity.NodeAffinity != nil && pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
if !nodeutil.PodFitsCurrentNode(pod, node) && nodeutil.PodFitsAnyNode(pod, nodes) {
glog.V(1).Infof("Evicting pod: %v", pod.Name)
klog.V(1).Infof("Evicting pod: %v", pod.Name)
evictions.EvictPod(ds.Client, pod, evictionPolicyGroupVersion, ds.DryRun)
nodepodCount[node]++
}
@@ -65,10 +66,10 @@ func removePodsViolatingNodeAffinityCount(ds *options.DeschedulerServer, strateg
evictedPodCount += nodepodCount[node]
}
default:
glog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
klog.Errorf("invalid nodeAffinityType: %v", nodeAffinity)
return evictedPodCount
}
}
glog.V(1).Infof("Evicted %v pods", evictedPodCount)
klog.V(1).Infof("Evicted %v pods", evictedPodCount)
return evictedPodCount
}

View File

@@ -22,11 +22,10 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"github.com/golang/glog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
)
@@ -42,7 +41,7 @@ func RemovePodsViolatingInterPodAntiAffinity(ds *options.DeschedulerServer, stra
func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node, dryRun bool, nodePodCount nodePodEvictedCount, maxPodsToEvict int, evictLocalStoragePods bool) int {
podsEvicted := 0
for _, node := range nodes {
glog.V(1).Infof("Processing node: %#v\n", node.Name)
klog.V(1).Infof("Processing node: %#v\n", node.Name)
pods, err := podutil.ListEvictablePodsOnNode(client, node, evictLocalStoragePods)
if err != nil {
return 0
@@ -55,10 +54,10 @@ func removePodsWithAffinityRules(client clientset.Interface, policyGroupVersion
if checkPodsWithAntiAffinityExist(pods[i], pods) {
success, err := evictions.EvictPod(client, pods[i], policyGroupVersion, dryRun)
if !success {
glog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
klog.Infof("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err)
} else {
nodePodCount[node]++
glog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
klog.V(1).Infof("Evicted pod: %#v (%#v)\n because of existing anti-affinity", pods[i].Name, err)
// Since the current pod is evicted all other pods which have anti-affinity with this
// pod need not be evicted.
// Update pods.
@@ -81,7 +80,7 @@ func checkPodsWithAntiAffinityExist(pod *v1.Pod, pods []*v1.Pod) bool {
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
if err != nil {
glog.Infof("%v", err)
klog.Infof("%v", err)
return false
}
for _, existingPod := range pods {

View File

@@ -19,9 +19,9 @@ package utils
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
)
@@ -70,7 +70,7 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
klog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms)
return nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms)
}
}
@@ -83,7 +83,7 @@ func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSele
for _, req := range nodeSelectorTerms {
nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
if err != nil {
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
klog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
return false
}
if nodeSelector.Matches(labels.Set(node.Labels)) {

View File

@@ -21,12 +21,11 @@ import (
"testing"
"time"
"github.com/golang/glog"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/api/testapi"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
@@ -104,12 +103,12 @@ func startEndToEndForLowNodeUtilization(clientset clientset.Interface) {
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
glog.Fatalf("%v", err)
klog.Fatalf("%v", err)
}
stopChannel := make(chan struct{})
nodes, err := nodeutil.ReadyNodes(clientset, "", stopChannel)
if err != nil {
glog.Fatalf("%v", err)
klog.Fatalf("%v", err)
}
nodeUtilizationThresholds := deschedulerapi.NodeResourceUtilizationThresholds{Thresholds: thresholds, TargetThresholds: targetThresholds}
nodeUtilizationStrategyParams := deschedulerapi.StrategyParameters{NodeResourceUtilizationThresholds: nodeUtilizationThresholds}