mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Merge pull request #156 from tammert/identical-deployments-fix
Added ownerRef.UID for evaluating duplicate Pods
This commit is contained in:
@@ -84,13 +84,12 @@ func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node, evictLo
|
||||
// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap.
|
||||
func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap {
|
||||
dpm := DuplicatePodsMap{}
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode which checks for error.
|
||||
for _, pod := range pods {
|
||||
// Ignoring the error here as in the ListDuplicatePodsOnNode function we call ListEvictablePodsOnNode
|
||||
// which checks for error.
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
for _, ownerRef := range ownerRefList {
|
||||
// ownerRef doesn't need namespace since the assumption is owner needs to be in the same namespace.
|
||||
s := strings.Join([]string{ownerRef.Kind, ownerRef.Name}, "/")
|
||||
// Namespace/Kind/Name should be unique for the cluster.
|
||||
s := strings.Join([]string{pod.ObjectMeta.Namespace, ownerRef.Kind, ownerRef.Name}, "/")
|
||||
dpm[s] = append(dpm[s], pod)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,26 +31,43 @@ import (
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
node := test.BuildTestNode("n1", 2000, 3000, 10)
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node.Name)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node.Name)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node.Name)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node.Name)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node.Name)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node.Name)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node.Name)
|
||||
p7.Namespace = "kube-system"
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node.Name)
|
||||
p8.Namespace = "test"
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node.Name)
|
||||
p9.Namespace = "test"
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node.Name)
|
||||
p10.Namespace = "test"
|
||||
|
||||
// All the following pods expect for one will be evicted.
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p9.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
// ### Evictable Pods ###
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A daemonset.
|
||||
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||
ownerRef2 := test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = ownerRef2
|
||||
p9.ObjectMeta.OwnerReferences = ownerRef2
|
||||
p10.ObjectMeta.OwnerReferences = ownerRef2
|
||||
|
||||
// ### Non-evictable Pods ###
|
||||
|
||||
// A DaemonSet.
|
||||
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
|
||||
// A Pod with local storage.
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
@@ -62,24 +79,30 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// A Mirror Pod.
|
||||
p6.Annotations = test.GetMirrorPodAnnotation()
|
||||
|
||||
// A Critical Pod.
|
||||
p7.Namespace = "kube-system"
|
||||
p7.Annotations = test.GetCriticalPodAnnotation()
|
||||
expectedEvictedPodCount := 2
|
||||
|
||||
// Setup the fake client.
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9}}, nil
|
||||
return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10}}, nil
|
||||
})
|
||||
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, node, nil
|
||||
})
|
||||
|
||||
expectedEvictedPodCount := 4
|
||||
npe := nodePodEvictedCount{}
|
||||
npe[node] = 0
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 2, false)
|
||||
|
||||
// Start evictions.
|
||||
podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}, false, npe, 10, false)
|
||||
if podsEvicted != expectedEvictedPodCount {
|
||||
t.Errorf("Unexpected no of pods evicted")
|
||||
t.Error("Unexpected number of pods evicted.\nExpected:\t", expectedEvictedPodCount, "\nActual:\t\t", podsEvicted)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func GetNormalPodOwnerRefList() []metav1.OwnerReference {
|
||||
// GetReplicaSetOwnerRefList returns the ownerRef needed for replicaset pod.
|
||||
func GetReplicaSetOwnerRefList() []metav1.OwnerReference {
|
||||
ownerRefList := make([]metav1.OwnerReference, 0)
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "ReplicaSet", APIVersion: "v1"})
|
||||
ownerRefList = append(ownerRefList, metav1.OwnerReference{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-1"})
|
||||
return ownerRefList
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user