1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

fix compile errors

This commit is contained in:
Mike Dame
2019-10-11 14:24:08 -04:00
parent 1652ba7976
commit 0af97c1b5e
5 changed files with 4 additions and 33 deletions

View File

@@ -45,7 +45,7 @@ func EvictPod(client clientset.Interface, pod *v1.Pod, policyGroupVersion string
},
DeleteOptions: deleteOptions,
}
err := client.Policy().Evictions(eviction.Namespace).Evict(eviction)
err := client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
if err == nil {
return true, nil

View File

@@ -50,7 +50,7 @@ func ReadyNodes(client clientset.Interface, nodeSelector string, stopChannel <-c
if len(nodes) == 0 {
glog.V(2).Infof("node lister returned empty list, now fetch directly")
nItems, err := client.Core().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
nItems, err := client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: nodeSelector})
if err != nil {
return []*v1.Node{}, err
}
@@ -78,7 +78,7 @@ func GetNodeLister(client clientset.Interface, stopChannel <-chan struct{}) core
if stopChannel == nil {
return nil
}
listWatcher := cache.NewListWatchFromClient(client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
nodeLister := corelisters.NewNodeLister(store)
reflector := cache.NewReflector(listWatcher, &v1.Node{}, store, time.Hour)

View File

@@ -30,31 +30,6 @@ const (
evictPodAnnotationKey = "descheduler.alpha.kubernetes.io/evict"
)
// checkLatencySensitiveResourcesForAContainer checks if there are any latency sensitive resources like GPUs.
func checkLatencySensitiveResourcesForAContainer(rl v1.ResourceList) bool {
if rl == nil {
return false
}
for rName := range rl {
if rName == v1.ResourceNvidiaGPU {
return true
}
// TODO: Add support for other high value resources like hugepages etc. once kube is rebased to 1.8.
}
return false
}
// IsLatencySensitivePod checks if a pod consumes high value devices like GPUs, hugepages or when cpu pinning enabled.
func IsLatencySensitivePod(pod *v1.Pod) bool {
for _, container := range pod.Spec.Containers {
resourceList := container.Resources.Requests
if checkLatencySensitiveResourcesForAContainer(resourceList) {
return true
}
}
return false
}
// IsEvictable checks if a pod is evictable or not.
func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool {
ownerRefList := OwnerRef(pod)

View File

@@ -183,7 +183,6 @@ func TestPodTypes(t *testing.T) {
p4 := test.BuildTestPod("p4", 400, 0, n1.Name)
p5 := test.BuildTestPod("p5", 400, 0, n1.Name)
p6 := test.BuildTestPod("p6", 400, 0, n1.Name)
p6.Spec.Containers[0].Resources.Requests[v1.ResourceNvidiaGPU] = *resource.NewMilliQuantity(3, resource.DecimalSI)
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
@@ -226,8 +225,5 @@ func TestPodTypes(t *testing.T) {
if IsDaemonsetPod(ownerRefList) || IsPodWithLocalStorage(p1) || IsCriticalPod(p1) || IsMirrorPod(p1) {
t.Errorf("Expected p1 to be a normal pod.")
}
if !IsLatencySensitivePod(p6) {
t.Errorf("Expected p6 to be latency sensitive pod")
}
}

View File

@@ -377,7 +377,7 @@ func NodeUtilization(node *v1.Node, pods []*v1.Pod, evictLocalStoragePods bool)
for name, quantity := range req {
if name == v1.ResourceCPU || name == v1.ResourceMemory {
if value, ok := totalReqs[name]; !ok {
totalReqs[name] = *quantity.Copy()
totalReqs[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
totalReqs[name] = value