1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Migrate RemovePodsViolatingNodeTaints into a plugin

This commit is contained in:
Jan Chaloupka
2022-06-15 11:26:17 +02:00
parent c699dd1ccc
commit 704f6d4496
9 changed files with 541 additions and 22 deletions

View File

@@ -0,0 +1,45 @@
package fake
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
)
type HandleImpl struct {
ClientsetImpl clientset.Interface
GetPodsAssignedToNodeFuncImpl podutil.GetPodsAssignedToNodeFunc
SharedInformerFactoryImpl informers.SharedInformerFactory
EvictorFilterImpl *evictions.EvictorFilter
PodEvictorImpl *evictions.PodEvictor
}
var _ framework.Handle = &HandleImpl{}
func (hi *HandleImpl) ClientSet() clientset.Interface {
return hi.ClientsetImpl
}
func (hi *HandleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.GetPodsAssignedToNodeFuncImpl
}
func (hi *HandleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.SharedInformerFactoryImpl
}
func (hi *HandleImpl) Evictor() framework.Evictor {
return hi
}
func (hi *HandleImpl) Filter(pod *v1.Pod) bool {
return hi.EvictorFilterImpl.Filter(pod)
}
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
}
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
return hi.PodEvictorImpl.NodeLimitExceeded(node)
}

View File

@@ -0,0 +1,129 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package removepodsviolatingnodetaints
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/utils"
)
const PluginName = "RemovePodsViolatingNodeTaints"
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
type RemovePodsViolatingNodeTaints struct {
handle framework.Handle
args *componentconfig.RemovePodsViolatingNodeTaintsArgs
taintFilterFnc func(taint *v1.Taint) bool
podFilter podutil.FilterFunc
}
var _ framework.Plugin = &RemovePodsViolatingNodeTaints{}
var _ framework.DeschedulePlugin = &RemovePodsViolatingNodeTaints{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
nodeTaintsArgs, ok := args.(*componentconfig.RemovePodsViolatingNodeTaintsArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingNodeTaintsArgs, got %T", args)
}
var includedNamespaces, excludedNamespaces sets.String
if nodeTaintsArgs.Namespaces != nil {
includedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Include...)
excludedNamespaces = sets.NewString(nodeTaintsArgs.Namespaces.Exclude...)
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(nodeTaintsArgs.LabelSelector).
BuildFilterFunc()
if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
excludedTaints := sets.NewString(nodeTaintsArgs.ExcludedTaints...)
excludeTaint := func(taint *v1.Taint) bool {
// Exclude taints by key *or* key=value
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))
}
taintFilterFnc := func(taint *v1.Taint) bool { return (taint.Effect == v1.TaintEffectNoSchedule) && !excludeTaint(taint) }
if nodeTaintsArgs.IncludePreferNoSchedule {
taintFilterFnc = func(taint *v1.Taint) bool {
return (taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectPreferNoSchedule) && !excludeTaint(taint)
}
}
return &RemovePodsViolatingNodeTaints{
handle: handle,
podFilter: podFilter,
args: nodeTaintsArgs,
taintFilterFnc: taintFilterFnc,
}, nil
}
// Name retrieves the plugin name
func (d *RemovePodsViolatingNodeTaints) Name() string {
return PluginName
}
// Deschedule extension point implementation for the plugin
func (d *RemovePodsViolatingNodeTaints) Deschedule(ctx context.Context, nodes []*v1.Node) *framework.Status {
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods
return &framework.Status{
Err: fmt.Errorf("error listing pods on a node: %v", err),
}
}
totalPods := len(pods)
skipNode := false
for i := 0; i < totalPods; i++ {
if !utils.TolerationsTolerateTaintsWithFilter(
pods[i].Spec.Tolerations,
node.Spec.Taints,
d.taintFilterFnc,
) {
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
if d.handle.Evictor().NodeLimitExceeded(node) {
skipNode = true
break
}
}
}
if skipNode {
continue
}
}
return nil
}

View File

@@ -0,0 +1,497 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package removepodsviolatingnodetaints
import (
"context"
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework"
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func createNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
Value: "test" + fmt.Sprintf("%v", index),
Effect: v1.TaintEffectNoSchedule,
}
}
func createPreferNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
Value: "test" + fmt.Sprintf("%v", index),
Effect: v1.TaintEffectPreferNoSchedule,
}
}
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
taints := []v1.Taint{}
for _, index := range indices {
taints = append(taints, createNoScheduleTaint(key, value, index))
}
node.Spec.Taints = taints
return node
}
func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.TaintEffect) *v1.Pod {
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: effect}}
return pod
}
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node5 := test.BuildTestNode("n5", 2000, 3000, 10, nil)
node5.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
node6 := test.BuildTestNode("n6", 1, 1, 1, nil)
node6.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p12 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
p12.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// The following 4 pods won't get evicted.
// A Critical Pod.
p7.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// A daemonset.
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p9.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
p10.Annotations = test.GetMirrorPodAnnotation()
p1 = addTolerationToPod(p1, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p3 = addTolerationToPod(p3, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
p12.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
p13 := test.BuildTestPod("p13", 100, 0, node5.Name, nil)
p13.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// node5 has PreferNoSchedule:testTaint1=test1, so the p13 has to have
// PreferNoSchedule:testTaint0=test0 so the pod is not tolarated
p13 = addTolerationToPod(p13, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
var uint1 uint = 1
tests := []struct {
description string
nodes []*v1.Node
pods []*v1.Pod
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount uint
nodeFit bool
includePreferNoSchedule bool
excludedTaints []string
}{
{
description: "Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p3, p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0, //nothing is evicted
},
{
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 1, //p9 gets evicted
},
{
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p7, p8, p10, p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 1, //p11 gets evicted
},
{
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: true,
expectedEvictedPodCount: 2, //p2 and p7 are evicted
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
{
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
pods: []*v1.Pod{p1, p3, p12},
nodes: []*v1.Node{node1, node3},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node4},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
{
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 0,
},
{
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
includePreferNoSchedule: true,
expectedEvictedPodCount: 1, // p13 gets evicted
},
{
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"excludedTaint1", "testTaint1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as one of the specified excludedTaints matches the key of node1's taint
},
{
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"testTaint1=test1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as both the key and value of the excluded taint match node1's taint
},
{
description: "The excluded taint matches the key of node1's taint, but does not match the value",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
excludedTaints: []string{"testTaint1=test2"},
expectedEvictedPodCount: 1, // pod gets evicted, as excluded taint value does not match node1's taint value
},
{
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node6},
evictLocalStoragePods: false,
evictSystemCriticalPods: true,
expectedEvictedPodCount: 0, //p2 and p7 can't be evicted
nodeFit: true,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
)
handle := &frameworkfake.HandleImpl{
ClientsetImpl: fakeClient,
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
PodEvictorImpl: podEvictor,
EvictorFilterImpl: evictions.NewEvictorFilter(
tc.nodes,
getPodsAssignedToNode,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,
false,
false,
evictions.WithNodeFit(tc.nodeFit),
),
SharedInformerFactoryImpl: sharedInformerFactory,
}
plugin, err := New(&componentconfig.RemovePodsViolatingNodeTaintsArgs{
IncludePreferNoSchedule: tc.includePreferNoSchedule,
ExcludedTaints: tc.excludedTaints,
},
handle,
)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
plugin.(framework.DeschedulePlugin).Deschedule(ctx, tc.nodes)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
})
}
}
func TestToleratesTaint(t *testing.T) {
testCases := []struct {
description string
toleration v1.Toleration
taint v1.Taint
expectTolerated bool
}{
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has no value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, and operator is Exists, and taint has some value, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same effect, toleration has empty key and operator is Exists, means match all taints, expect tolerated",
toleration: v1.Toleration{
Key: "",
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key, effect and value, and operator is Equal, expect tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: true,
},
{
description: "toleration and taint have the same key and effect, but different values, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "value1",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "value2",
Effect: v1.TaintEffectNoSchedule,
},
expectTolerated: false,
},
{
description: "toleration and taint have the same key and value, but different effects, and operator is Equal, expect not tolerated",
toleration: v1.Toleration{
Key: "foo",
Operator: v1.TolerationOpEqual,
Value: "bar",
Effect: v1.TaintEffectNoSchedule,
},
taint: v1.Taint{
Key: "foo",
Value: "bar",
Effect: v1.TaintEffectNoExecute,
},
expectTolerated: false,
},
}
for _, tc := range testCases {
if tolerated := tc.toleration.ToleratesTaint(&tc.taint); tc.expectTolerated != tolerated {
t.Errorf("[%s] expect %v, got %v: toleration %+v, taint %s", tc.description, tc.expectTolerated, tolerated, tc.toleration, tc.taint.ToString())
}
}
}

72
pkg/framework/types.go Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)
// Handle provides handles used by plugins to retrieve a kubernetes client set,
// evictor interface, shared informer factory and other instruments shared
// across plugins.
type Handle interface {
// ClientSet returns a kubernetes clientSet.
ClientSet() clientset.Interface
Evictor() Evictor
GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc
SharedInformerFactory() informers.SharedInformerFactory
}
// Evictor defines an interface for filtering and evicting pods
// while abstracting away the specific pod evictor/evictor filter.
type Evictor interface {
// Filter checks if a pod can be evicted
Filter(*v1.Pod) bool
// Evict evicts a pod (no pre-check performed)
Evict(context.Context, *v1.Pod, evictions.EvictOptions) bool
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
NodeLimitExceeded(node *v1.Node) bool
}
// Status describes result of an extension point invocation
type Status struct {
Err error
}
// Plugin is the parent type for all the descheduling framework plugins.
type Plugin interface {
Name() string
}
// DeschedulePlugin defines an extension point for a general descheduling operation
type DeschedulePlugin interface {
Plugin
Deschedule(ctx context.Context, nodes []*v1.Node) *Status
}
// BalancePlugin defines an extension point for balancing pods across a cluster
type BalancePlugin interface {
Plugin
Balance(ctx context.Context, nodes []*v1.Node) *Status
}