From e2b60d5f927d2d7b04f03a9844b6697b80870b52 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Mon, 28 Aug 2017 13:43:03 -0400 Subject: [PATCH] Unit tests for duplicatePod deletion strategy. --- pkg/rescheduler/strategies/duplicates.go | 19 +- pkg/rescheduler/strategies/duplicates_test.go | 177 ++++++++++++++++++ 2 files changed, 193 insertions(+), 3 deletions(-) create mode 100644 pkg/rescheduler/strategies/duplicates_test.go diff --git a/pkg/rescheduler/strategies/duplicates.go b/pkg/rescheduler/strategies/duplicates.go index d47ac50c7..d1705ca9c 100644 --- a/pkg/rescheduler/strategies/duplicates.go +++ b/pkg/rescheduler/strategies/duplicates.go @@ -21,6 +21,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api/v1" + //TODO: Change to client-go instead of generated clientset. "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "github.com/aveshagarwal/rescheduler/pkg/api" @@ -31,14 +32,22 @@ import ( //type creator string type DuplicatePodsMap map[string][]*v1.Pod +// RemoveDuplicatePods removes the duplicate pods on node. This strategy evicts all duplicate pods on node. +// A pod is said to be a duplicate of other if both of them are from same creator, kind and are within the same +// namespace. As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages. func RemoveDuplicatePods(client clientset.Interface, strategy api.ReschedulerStrategy, policyGroupVersion string, nodes []*v1.Node) { if !strategy.Enabled { return } + deleteDuplicatePods(client, policyGroupVersion, nodes) +} +// deleteDuplicatePods evicts the pod from node and returns the count of evicted pods. +func deleteDuplicatePods(client clientset.Interface, policyGroupVersion string, nodes []*v1.Node) int { + podsEvicted := 0 for _, node := range nodes { fmt.Printf("\nProcessing node: %#v\n", node.Name) - dpm := RemoveDuplicatePodsOnANode(client, node) + dpm := ListDuplicatePodsOnANode(client, node) for creator, pods := range dpm { if len(pods) > 1 { fmt.Printf("%#v\n", creator) @@ -47,17 +56,21 @@ func RemoveDuplicatePods(client clientset.Interface, strategy api.ReschedulerStr //fmt.Printf("Removing duplicate pod %#v\n", k.Name) success, err := evictions.EvictPod(client, pods[i], policyGroupVersion) if !success { + //TODO: change fmt.Printf as glogs. fmt.Printf("Error when evicting pod: %#v (%#v)\n", pods[i].Name, err) } else { + podsEvicted++ fmt.Printf("Evicted pod: %#v (%#v)\n", pods[i].Name, err) } } } } } + return podsEvicted } -func RemoveDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap { +// ListDuplicatePodsOnANode lists duplicate pods on a given node. +func ListDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) DuplicatePodsMap { pods, err := podutil.ListPodsOnANode(client, node) if err != nil { return nil @@ -65,9 +78,9 @@ func RemoveDuplicatePodsOnANode(client clientset.Interface, node *v1.Node) Dupli return FindDuplicatePods(pods) } +// FindDuplicatePods takes a list of pods and returns a duplicatePodsMap. func FindDuplicatePods(pods []*v1.Pod) DuplicatePodsMap { dpm := DuplicatePodsMap{} - for _, pod := range pods { sr, err := podutil.CreatorRef(pod) if err != nil || sr == nil { diff --git a/pkg/rescheduler/strategies/duplicates_test.go b/pkg/rescheduler/strategies/duplicates_test.go new file mode 100644 index 000000000..3751c2909 --- /dev/null +++ b/pkg/rescheduler/strategies/duplicates_test.go @@ -0,0 +1,177 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategies + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/client-go/testing" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake" +) + +// TODO:@ravisantoshgudimetla. As of now building some test pods here. This needs to +// move to utils after refactor. +// buildTestPod creates a test pod with given parameters. +func buildTestPod(name string, cpu int64, memory int64, nodeName string) *v1.Pod { + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: name, + SelfLink: fmt.Sprintf("/api/v1/namespaces/default/pods/%s", name), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{}, + }, + }, + }, + NodeName: nodeName, + }, + } + if cpu >= 0 { + pod.Spec.Containers[0].Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI) + } + if memory >= 0 { + pod.Spec.Containers[0].Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI) + } + + return pod +} + +// buildTestNode creates a node with specified capacity. +func buildTestNode(name string, millicpu int64, mem int64) *v1.Node { + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + SelfLink: fmt.Sprintf("/api/v1/nodes/%s", name), + Labels: map[string]string{}, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI), + v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI), + }, + Allocatable: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI), + v1.ResourceCPU: *resource.NewMilliQuantity(millicpu, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(mem, resource.DecimalSI), + }, + Phase: v1.NodeRunning, + Conditions: []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionTrue}, + }, + }, + } + return node +} + +// getMirrorPodAnnotation returns the annotation needed for mirror pod. +func getMirrorPodAnnotation() map[string]string { + return map[string]string{ + "kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}", + "kubernetes.io/config.source": "api", + "kubernetes.io/config.mirror": "mirror", + } +} + +// getNormalPodAnnotation returns the annotation needed for a normal pod. A normal pod is one without any references to +func getNormalPodAnnotation() map[string]string { + return map[string]string{ + "kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}", + } +} + +// getReplicaSetAnnotation returns the annotation needed for replicaset pod. +func getReplicaSetAnnotation() map[string]string { + return map[string]string{ + "kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"ReplicaSet\"}}", + } +} + +// getDaemonSetAnnotation returns the annotation needed for daemonset pod. +func getDaemonSetAnnotation() map[string]string { + return map[string]string{ + "kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"DaemonSet\"}}", + } +} + +// getCriticalPodAnnotation returns the annotation needed for daemonset pod. +func getCriticalPodAnnotation() map[string]string { + return map[string]string{ + "kubernetes.io/created-by": "{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Pod\"}}", + "scheduler.alpha.kubernetes.io/critical-pod": "", + } +} + +//TODO:@ravisantoshgudimetla This could be made table driven. +func TestFindDuplicatePods(t *testing.T) { + node := buildTestNode("n1", 2000, 3000) + p1 := buildTestPod("p1", 100, 0, node.Name) + p2 := buildTestPod("p2", 100, 0, node.Name) + p3 := buildTestPod("p3", 100, 0, node.Name) + p4 := buildTestPod("p4", 100, 0, node.Name) + p5 := buildTestPod("p5", 100, 0, node.Name) + p6 := buildTestPod("p6", 100, 0, node.Name) + p7 := buildTestPod("p7", 100, 0, node.Name) + + // All the following pods expect for one will be evicted. + p1.Annotations = getReplicaSetAnnotation() + p2.Annotations = getReplicaSetAnnotation() + p3.Annotations = getReplicaSetAnnotation() + + // The following 4 pods won't get evicted. + // A daemonset. + p4.Annotations = getDaemonSetAnnotation() + // A pod with local storage. + p5.Annotations = getNormalPodAnnotation() + p5.Spec.Volumes = []v1.Volume{ + { + Name: "sample", + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, + EmptyDir: &v1.EmptyDirVolumeSource{ + SizeLimit: *resource.NewQuantity(int64(10), resource.BinarySI)}, + }, + }, + } + // A Mirror Pod. + p6.Annotations = getMirrorPodAnnotation() + // A Critical Pod. + p7.Namespace = "kube-system" + p7.Annotations = getCriticalPodAnnotation() + expectedEvictedPodCount := 2 + fakeClient := &fake.Clientset{} + fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) { + return true, &v1.PodList{Items: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7}}, nil + }) + fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) { + return true, node, nil + }) + podsEvicted := deleteDuplicatePods(fakeClient, "v1", []*v1.Node{node}) + if podsEvicted != expectedEvictedPodCount { + t.Errorf("Unexpected no of pods evicted") + } + +}