1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 21:31:18 +01:00

Compare commits

..

290 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
7d2c31cd39 Merge pull request #1808 from ingvagabund/profile-instance-id
feat(profile): inject a plugin instance ID to each built plugin
2026-01-09 15:33:43 +05:30
Jan Chaloupka
cf9edca33c feat(profile): inject a plugin instance ID to each built plugin 2026-01-06 12:26:35 +01:00
Kubernetes Prow Robot
f164943257 Merge pull request #1807 from ingvagabund/docs
doc(Design Decisions FAQ): Why doesn't the framework provide helpers for registering and retrieving indexers for plugins
2026-01-05 21:20:39 +05:30
Jan Chaloupka
1fe9e2c345 doc(Design Decisions FAQ): Why doesn't the framework provide helpers for registering and retrieving indexers for plugins 2026-01-05 16:10:39 +01:00
Kubernetes Prow Robot
16ccff8ed8 Merge pull request #1806 from ingvagabund/profile-refactoring
refactor(pkg/framework/profile): dedup unit test code
2026-01-05 15:12:37 +05:30
Jan Chaloupka
38f0f15787 chore: make gen 2026-01-04 20:23:13 +01:00
Jan Chaloupka
52f2aea444 refactor(pkg/framework/profile): add registerDefaultEvictor helper function 2026-01-04 19:43:47 +01:00
Jan Chaloupka
f3c63011cc refactor(pkg/framework/profile): add fake plugin registration helpers 2026-01-04 19:43:29 +01:00
Jan Chaloupka
47b939dd86 refactor(pkg/framework/profile): build a profile through a shared function to reduce code duplication 2026-01-04 19:42:30 +01:00
Kubernetes Prow Robot
89c88f483b Merge pull request #1800 from ingvagabund/readnodes-unit-test
fix(TestReadyNodesWithNodeSelector): make sure nodeLister.List always returns a non-empty list so the lister is always tested
2025-12-26 20:48:31 +05:30
Kubernetes Prow Robot
d558fa3a5b Merge pull request #1804 from ingvagabund/refactorings
refactor(plugins): simplify the way pods are created
2025-12-21 14:38:30 -08:00
Jan Chaloupka
7ef3673d4c refactor: inline single-statement apply functions in BuildTestPod calls 2025-12-21 21:41:59 +01:00
Jan Chaloupka
988e0b8868 refactor: replace pod.Spec.Volumes with test.SetHostPathEmptyDirVolumeSource in plugin tests 2025-12-21 21:36:24 +01:00
Jan Chaloupka
fc8ae3b4e8 refactor: replace pod.Spec.Priority with test.SetPodPriority in plugin tests 2025-12-21 21:36:05 +01:00
Kubernetes Prow Robot
6d7fedc982 Merge pull request #1803 from ingvagabund/refactor-defaultevictir-test
refactor(TestDefaultEvictor): de-dup code and use helpers
2025-12-20 06:54:31 -08:00
Jan Chaloupka
769ded35f1 make fmt 2025-12-20 15:24:37 +01:00
Jan Chaloupka
3283635149 refactor(defaultevictor_test): move newProtectedStorageClassesConfig to package level
Move the newProtectedStorageClassesConfig helper function from local scope
to package level so it can be reused by both TestDefaultEvictorFilter and
Test_protectedPVCStorageClasses, eliminating code duplication.
2025-12-20 15:17:06 +01:00
Jan Chaloupka
994ce3e2f7 refactor(TestDefaultEvictor): add setPodPVCVolumeWithFooClaimName helper function 2025-12-20 14:36:10 +01:00
Jan Chaloupka
90e4115b78 refactor(TestDefaultEvictor): add setPodLocalStorage helper function 2025-12-20 14:29:19 +01:00
Jan Chaloupka
8913d79d14 refactor(TestDefaultEvictor): replace mirror pod annotation assignments with test.SetMirrorPodAnnotation 2025-12-20 14:28:08 +01:00
Jan Chaloupka
9a5d7e8286 refactor(TestDefaultEvictor): replace system critical priority assignments with test.SetPodPriority 2025-12-20 14:27:18 +01:00
Jan Chaloupka
674e463bc2 refactor(TestDefaultEvictor): replace manual owner reference assignments with test utilities 2025-12-20 14:25:58 +01:00
Jan Chaloupka
1df3ef5030 refactor(TestDefaultEvictor): add setPodEvictAnnotation helper function 2025-12-20 14:22:28 +01:00
Jan Chaloupka
3068f8431a refactor(TestDefaultEvictor): add setPodNodeSelector helper function 2025-12-20 14:11:46 +01:00
Jan Chaloupka
dfd2b95d2d refactor(TestDefaultEvictor): add setNodeLabel helper function 2025-12-20 14:08:11 +01:00
Jan Chaloupka
3bb4529c34 refactor(TestDefaultEvictor): use test.SetNormalOwnerRef 2025-12-20 14:00:40 +01:00
Jan Chaloupka
b8765bd8ee refactor(TestDefaultEvictor): add setNodeTaint helper function 2025-12-20 13:54:50 +01:00
Jan Chaloupka
d666e4b830 refactor(TestDefaultEvictor): add buildTestPod helper function 2025-12-20 13:43:00 +01:00
Jan Chaloupka
08f733863e refactor(TestDefaultEvictor): add buildTestNode helper function 2025-12-20 13:25:39 +01:00
Jan Chaloupka
60da931e0e fix(TestReadyNodesWithNodeSelector): make sure nodeLister.List always returns a non-empty list so the lister is always tested
The case of an empty list of nodes from the lister is not easy to catch.
This change makes sure one more initial condition is met.
2025-12-18 17:06:25 +01:00
Kubernetes Prow Robot
12a9db4da0 Merge pull request #1798 from ingvagabund/contriburing-descheduler
doc: introduce contributing guidelines specific to the project
2025-12-15 12:43:47 -08:00
Jan Chaloupka
41da7497c3 doc: introduce contributing guidelines specific to the project
The document is to be extended on the fly
2025-12-15 21:11:50 +01:00
Kubernetes Prow Robot
b56f3cdae9 Merge pull request #1797 from ingvagabund/nodetaint-unit-test
refactor(TestDeletePodsViolatingNodeTaints): inline object creation
2025-12-15 09:51:45 -08:00
Jan Chaloupka
162a2d14b7 refactor(TestRemovePodsHavingTooManyRestarts): remove leftover comments 2025-12-15 18:20:38 +01:00
Jan Chaloupka
78788d72de refactor(node_taint_test): inline p15 2025-12-15 18:19:04 +01:00
Jan Chaloupka
956eeefede refactor(node_taint_test): inline p14 2025-12-15 18:18:59 +01:00
Jan Chaloupka
1f7bd1fba9 refactor(node_taint_test): inline p13 2025-12-15 18:18:56 +01:00
Jan Chaloupka
5fdf368593 refactor(node_taint_test): inline p12 2025-12-15 18:18:51 +01:00
Jan Chaloupka
50b6e178c1 refactor(node_taint_test): inline p11 2025-12-15 18:18:47 +01:00
Jan Chaloupka
c1ad532c46 refactor(node_taint_test): inline p10 2025-12-15 18:18:43 +01:00
Jan Chaloupka
7e40aae2dc refactor(node_taint_test): inline p9 2025-12-15 18:18:39 +01:00
Jan Chaloupka
e09bd976f5 refactor(node_taint_test): inline p8 2025-12-15 18:18:35 +01:00
Jan Chaloupka
ffb1f44144 refactor(node_taint_test): inline p7 2025-12-15 18:18:24 +01:00
Jan Chaloupka
cb595f2524 refactor(node_taint_test): inline p6 2025-12-15 18:17:34 +01:00
Jan Chaloupka
c46817f6df refactor(node_taint_test): inline p5 2025-12-15 18:17:30 +01:00
Jan Chaloupka
032db38d6c refactor(node_taint_test): inline p4 2025-12-15 18:17:26 +01:00
Jan Chaloupka
c1cd3ae794 refactor(node_taint_test): inline p3 2025-12-15 18:17:21 +01:00
Jan Chaloupka
060d9c8573 refactor(node_taint_test): inline p2 2025-12-15 18:17:17 +01:00
Jan Chaloupka
51bcf60ccf refactor(node_taint_test): inline p1 2025-12-15 18:17:12 +01:00
Jan Chaloupka
b472549cf6 refactor(node_taint_test): add withKubeSystemCriticalPod helper 2025-12-15 18:16:58 +01:00
Jan Chaloupka
c68e8a6d06 refactor(node_taint_test): add withTestTaintXToleration1 helper 2025-12-15 18:16:19 +01:00
Jan Chaloupka
68d9d4d044 refactor(node_taint_test): add datacenter label constants 2025-12-15 18:16:14 +01:00
Jan Chaloupka
452b1ff7d9 refactor(node_taint_test): add SetPodVolumes and withLocalStorageVolume helpers 2025-12-15 18:16:08 +01:00
Jan Chaloupka
f123f78b44 refactor: add SetSystemCriticalPriority helper function 2025-12-15 18:15:31 +01:00
Jan Chaloupka
ca0f7535fb refactor: add SetMirrorPodAnnotation helper function 2025-12-15 17:41:46 +01:00
Jan Chaloupka
78ff3fe92a refactor(node_taint_test): add withTestTaintToleration1 helper 2025-12-15 17:37:45 +01:00
Jan Chaloupka
0269283185 refactor(node_taint_test): add buildTestPodWithNormalOwnerRef helper 2025-12-15 17:34:18 +01:00
Jan Chaloupka
57ed329feb refactor(node_taint_test): inline node7 2025-12-15 17:26:04 +01:00
Jan Chaloupka
b96a41a745 refactor(node_taint_test): inline node6 2025-12-15 17:25:27 +01:00
Jan Chaloupka
6b6f7ba5c7 refactor(node_taint_test): inline node5 2025-12-15 17:24:53 +01:00
Jan Chaloupka
a3ca65ea14 refactor(node_taint_test): inline node4 2025-12-15 17:24:17 +01:00
Jan Chaloupka
d81580c93e refactor(node_taint_test): inline node3 2025-12-15 17:23:42 +01:00
Jan Chaloupka
0f7ff8a2b7 refactor(node_taint_test): inline node2 2025-12-15 17:23:08 +01:00
Jan Chaloupka
d27afd0319 refactor(node_taint_test): inline node1 2025-12-15 17:21:25 +01:00
Jan Chaloupka
3d48efdff4 refactor(node_taint_test): add dedicated functions for remaining nodes 2025-12-15 17:14:06 +01:00
Jan Chaloupka
e5d5cf2229 refactor(node_taint_test): create dedicated functions for taint configurations 2025-12-15 17:11:54 +01:00
Jan Chaloupka
f65209d4fa refactor(node_taint_test): inline addTaintsToNode 2025-12-15 17:08:50 +01:00
Jan Chaloupka
b9ceb9144f refactor(node_taint_test): remove default false settings for evict flags 2025-12-15 17:01:43 +01:00
Jan Chaloupka
2bbec0cbc6 refactor(node_taint_test): apply pod single creation convention for p15 2025-12-15 16:55:21 +01:00
Jan Chaloupka
a363da9806 refactor(node_taint_test): apply pod single creation convention for p14 2025-12-15 16:55:02 +01:00
Jan Chaloupka
63b3bd3b4d refactor(node_taint_test): apply pod single creation convention for p13 2025-12-15 16:54:44 +01:00
Jan Chaloupka
7fb935c650 refactor(node_taint_test): replace GetNormalPodOwnerRefList with SetNormalOwnerRef 2025-12-15 16:52:42 +01:00
Kubernetes Prow Robot
f85b2f8d4d Merge pull request #1796 from ingvagabund/nodeaffinity-unit-test
refactor(TestRemovePodsViolatingNodeAffinity): inline object creation
2025-12-15 07:45:47 -08:00
Jan Chaloupka
0580b5942c refactor(node_taint_test): apply pod single creation convention for p12 2025-12-15 16:44:46 +01:00
Jan Chaloupka
4171af7e8a refactor(node_taint_test): apply pod single creation convention for p11 2025-12-15 16:44:19 +01:00
Jan Chaloupka
a1678cd464 refactor(node_taint_test): apply pod single creation convention for p10 2025-12-15 16:44:01 +01:00
Jan Chaloupka
2f90d1dd01 refactor(node_taint_test): apply pod single creation convention for p9 2025-12-15 16:43:32 +01:00
Jan Chaloupka
f0cda32b6e refactor(node_taint_test): apply pod single creation convention for p8 2025-12-15 16:43:01 +01:00
Jan Chaloupka
43523113ff refactor(node_taint_test): apply pod single creation convention for p7 2025-12-15 16:42:34 +01:00
Jan Chaloupka
1b7889f4a3 refactor(node_taint_test): apply pod single creation convention for p6 2025-12-15 16:42:00 +01:00
Jan Chaloupka
b86315f097 refactor(node_taint_test): apply pod single creation convention for p5 2025-12-15 16:41:39 +01:00
Jan Chaloupka
0d496dfc5d refactor(node_taint_test): apply pod single creation convention for p4 2025-12-15 16:41:19 +01:00
Jan Chaloupka
d6b35eaed6 refactor(node_taint_test): apply pod single creation convention for p3 2025-12-15 16:40:41 +01:00
Jan Chaloupka
dc18f9f330 refactor(node_taint_test): apply pod single creation convention for p2 2025-12-15 16:39:54 +01:00
Jan Chaloupka
39212419e6 refactor(node_taint_test): apply pod single creation convention for p1 2025-12-15 16:38:35 +01:00
Jan Chaloupka
64f77ce6ee refactor(node_taint_test): apply node single creation convention for node7 2025-12-15 16:35:38 +01:00
Jan Chaloupka
ca5326c5c4 refactor(node_taint_test): apply node single creation convention for node6 2025-12-15 16:35:12 +01:00
Jan Chaloupka
9cf075ffc4 refactor(node_taint_test): apply node single creation convention for node5 2025-12-15 16:34:47 +01:00
Jan Chaloupka
3325fe0b8b refactor(node_taint_test): apply node single creation convention for node2 2025-12-15 16:33:28 +01:00
Jan Chaloupka
6c41ebd8f3 refactor(node_taint_test): apply node single creation convention for node1 2025-12-15 16:33:12 +01:00
Jan Chaloupka
ba034d6e0e refactor(node_taint_test): add node name constants 2025-12-15 16:23:38 +01:00
Jan Chaloupka
3289554f90 refactor(node_taint_test): add buildTestPod helper function 2025-12-15 16:20:01 +01:00
Jan Chaloupka
72575c2f23 refactor(node_taint_test): add buildTestNode helper function 2025-12-15 16:17:49 +01:00
Jan Chaloupka
07616c3fc0 refactor(TestRemovePodsHavingTooManyRestarts): make fmt 2025-12-15 16:14:50 +01:00
Jan Chaloupka
cad120881f refactor(TestRemovePodsViolatingNodeAffinity): apply pod single creation convention 2025-12-15 16:02:37 +01:00
Jan Chaloupka
aec4416099 refactor(TestRemovePodsViolatingNodeAffinity): add buildUnschedulableNodeWithLabels function 2025-12-15 15:59:01 +01:00
Jan Chaloupka
7b9d5d2539 refactor(TestRemovePodsViolatingNodeAffinity): inline nodeWithoutLabels 2025-12-15 15:50:25 +01:00
Jan Chaloupka
9f7629136f refactor(TestRemovePodsViolatingNodeAffinity): inline nodeWithLabels 2025-12-15 15:48:00 +01:00
Jan Chaloupka
42d255fd95 refactor(TestRemovePodsViolatingNodeAffinity): update addPodsToNode to accept nodeName 2025-12-15 15:33:58 +01:00
Jan Chaloupka
183a138d82 refactor(TestRemovePodsViolatingNodeAffinity): add constants for node names 2025-12-15 15:28:03 +01:00
Kubernetes Prow Robot
f669c45892 Merge pull request #1795 from ingvagabund/podantiaffinity-unit-test
refactor(TestPodAntiAffinity): inline object creation
2025-12-15 05:53:45 -08:00
Jan Chaloupka
a2ffbc1261 refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for podWithNodeAffinity 2025-12-15 14:35:05 +01:00
Jan Chaloupka
2cda1bd89d refactor(TestRemovePodsViolatingNodeAffinity): deduplicate pod creation with buildTestPod helper 2025-12-15 14:35:03 +01:00
Jan Chaloupka
691a1da43b refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for unschedulableNodeWithLabels 2025-12-15 14:34:59 +01:00
Jan Chaloupka
8fe74c7a0c refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for nodeWithLabels 2025-12-15 14:34:54 +01:00
Jan Chaloupka
102bd6a91d refactor(TestRemovePodsViolatingNodeAffinity): deduplicate node creation with buildTestNode helper 2025-12-15 14:34:44 +01:00
Jan Chaloupka
3d1e15bb82 refactor(TestPodAntiAffinity): apply gofumpt formatting 2025-12-15 14:21:32 +01:00
Jan Chaloupka
3c02d9029c refactor(TestPodAntiAffinity): inline p1, p2, p4 builders 2025-12-15 14:13:51 +01:00
Jan Chaloupka
57a3e610a7 refactor(TestPodAntiAffinity): deduplicate p1, p3, p4 builders 2025-12-15 14:06:05 +01:00
Jan Chaloupka
7cec27d467 refactor(TestPodAntiAffinity): inline nonEvictablePod 2025-12-15 13:56:37 +01:00
Jan Chaloupka
688b45011a refactor(TestPodAntiAffinity): inline p11 2025-12-15 13:56:07 +01:00
Jan Chaloupka
a96451030c refactor(TestPodAntiAffinity): inline p10 2025-12-15 13:55:36 +01:00
Jan Chaloupka
a4930ebc83 refactor(TestPodAntiAffinity): inline p9 2025-12-15 13:55:11 +01:00
Jan Chaloupka
ad872f8b77 refactor(TestPodAntiAffinity): inline p8 2025-12-15 13:54:40 +01:00
Jan Chaloupka
a0654df270 refactor(TestPodAntiAffinity): inline p7 2025-12-15 13:54:07 +01:00
Jan Chaloupka
03b5a9a967 refactor(TestPodAntiAffinity): inline p6 2025-12-15 13:53:43 +01:00
Jan Chaloupka
9f2d22c1f7 refactor(TestPodAntiAffinity): inline p5 2025-12-15 13:53:19 +01:00
Jan Chaloupka
cbe1c1e559 refactor(TestPodAntiAffinity): inline p4 2025-12-15 13:52:21 +01:00
Jan Chaloupka
87182c5e8f refactor(TestPodAntiAffinity): inline p3 2025-12-15 13:51:58 +01:00
Jan Chaloupka
2765e31048 refactor(TestPodAntiAffinity): inline p2 2025-12-15 13:51:00 +01:00
Jan Chaloupka
87f675a2cd refactor(TestPodAntiAffinity): inline p1 2025-12-15 13:48:01 +01:00
Jan Chaloupka
a400a66d51 refactor(TestPodAntiAffinity): create dedicated builders for p1-p4 and nonEvictablePod 2025-12-15 13:43:11 +01:00
Jan Chaloupka
fa427a2b37 refactor(TestPodAntiAffinity): deduplicate setting Labels for foo1-bar1 2025-12-15 13:38:58 +01:00
Jan Chaloupka
90672630da refactor(TestPodAntiAffinity): deduplicate setting Labels for foo-bar 2025-12-15 13:37:55 +01:00
Jan Chaloupka
6a00214457 refactor(TestPodAntiAffinity): deduplicate setPodAntiAffinity for foo1-bar1 2025-12-15 13:36:27 +01:00
Jan Chaloupka
9413b0c654 refactor(TestPodAntiAffinity): deduplicate setPodAntiAffinity for foo-bar 2025-12-15 13:34:05 +01:00
Jan Chaloupka
3072a59ea0 refactor(TestPodAntiAffinity): ensure nonEvictablePod is created only through apply argument 2025-12-15 13:25:04 +01:00
Jan Chaloupka
0e56823865 refactor(TestPodAntiAffinity): ensure p11 is created only through apply argument 2025-12-15 13:24:09 +01:00
Jan Chaloupka
ea80f7d307 refactor(TestPodAntiAffinity): ensure p10 is created only through apply argument 2025-12-15 13:23:46 +01:00
Jan Chaloupka
6638b976ad refactor(TestPodAntiAffinity): ensure p9 is created only through apply argument 2025-12-15 13:23:10 +01:00
Jan Chaloupka
116385718f refactor(TestPodAntiAffinity): ensure p8 is created only through apply argument 2025-12-15 13:22:35 +01:00
Jan Chaloupka
5ad695166a refactor(TestPodAntiAffinity): ensure p7 is created only through apply argument 2025-12-15 13:22:12 +01:00
Jan Chaloupka
d5e0ec597f refactor(TestPodAntiAffinity): ensure p6 is created only through apply argument 2025-12-15 13:21:32 +01:00
Jan Chaloupka
4b86cdd31a refactor(TestPodAntiAffinity): ensure p5 is created only through apply argument 2025-12-15 13:20:53 +01:00
Jan Chaloupka
99527292e0 refactor(TestPodAntiAffinity): ensure p4 is created only through apply argument 2025-12-15 13:20:06 +01:00
Jan Chaloupka
cf79af6fba refactor(TestPodAntiAffinity): ensure p3 is created only through apply argument 2025-12-15 13:19:38 +01:00
Jan Chaloupka
da55c779f2 refactor(TestPodAntiAffinity): ensure p2 is created only through apply argument 2025-12-15 13:18:50 +01:00
Kubernetes Prow Robot
bc6500d917 Merge pull request #1794 from ingvagabund/toomanyrestarts-unit-test
refactor(TestRemovePodsHavingTooManyRestarts): inline object creation
2025-12-15 03:59:45 -08:00
Jan Chaloupka
c5b9debe56 refactor(TestPodAntiAffinity): ensure p1 is created only through apply argument 2025-12-15 12:57:19 +01:00
Jan Chaloupka
18f847bbe8 refactor(TestPodAntiAffinity): create buildTestPodForNode1 to deduplicate nodeName1 2025-12-15 12:54:42 +01:00
Jan Chaloupka
6e753ac5fb refactor(TestPodAntiAffinity): create buildTestPod helper to deduplicate 100 and 0 literals 2025-12-15 12:52:02 +01:00
Jan Chaloupka
b797ca6ba2 refactor(TestPodAntiAffinity): inline node2, node3, node4, and node5 2025-12-15 12:48:50 +01:00
Jan Chaloupka
4ffabad669 refactor(TestPodAntiAffinity): create buildTestNode1 and inline node1 2025-12-15 12:45:07 +01:00
Jan Chaloupka
bba62ccb93 refactor(TestPodAntiAffinity): extract setNodeMainRegionLabel helper 2025-12-15 12:40:46 +01:00
Jan Chaloupka
1f856595f5 refactor(TestPodAntiAffinity): add nodeName constants
refactor(TestPodAntiAffinity): replace node.Name with nodeName constants
2025-12-15 12:36:23 +01:00
Jan Chaloupka
993162dd44 refactor(TestPodAntiAffinity): replace test.BuildTestNode with buildTestNode helper 2025-12-15 12:30:30 +01:00
Jan Chaloupka
ee73336fd8 refactor(TestRemovePodsHavingTooManyRestarts): inline node5 2025-12-15 12:25:03 +01:00
Jan Chaloupka
75f655e271 refactor(TestRemovePodsHavingTooManyRestarts): inline node4 2025-12-15 12:24:46 +01:00
Jan Chaloupka
76895273f9 refactor(TestRemovePodsHavingTooManyRestarts): inline node3 2025-12-15 12:11:09 +01:00
Jan Chaloupka
35d2103fcf refactor(TestRemovePodsHavingTooManyRestarts): inline node2 2025-12-15 12:10:34 +01:00
Jan Chaloupka
b069ae009a refactor(TestRemovePodsHavingTooManyRestarts): inline node1 2025-12-15 12:08:58 +01:00
Jan Chaloupka
be275deea5 refactor(TestRemovePodsHavingTooManyRestarts): node3 as a constant 2025-12-15 12:06:47 +01:00
Jan Chaloupka
a5d3241a54 refactor(TestRemovePodsHavingTooManyRestarts): replace test.BuildTestNode with buildTestNode helpers 2025-12-15 12:05:44 +01:00
Jan Chaloupka
2af9ea8449 refactor(TestRemovePodsHavingTooManyRestarts): remove applyFunc and apply modifications in initPods 2025-12-15 12:04:20 +01:00
Jan Chaloupka
60fa5aa228 refactor(TestRemovePodsHavingTooManyRestarts): create all the pods as part of a unit test definition 2025-12-15 12:04:10 +01:00
Jan Chaloupka
a94d22fd1b refactor(TestRemovePodsHavingTooManyRestarts): create all testing pods under initPods 2025-12-15 12:03:59 +01:00
Jan Chaloupka
8c70b02088 refactor(TestRemovePodsHavingTooManyRestarts): single testing pods creation 2025-12-15 12:03:22 +01:00
Jan Chaloupka
ec58fed521 refactor(TestRemovePodsHavingTooManyRestarts): create each init container through a single invocation 2025-12-15 12:03:08 +01:00
Jan Chaloupka
bf9cf0ee1c refactor(TestRemovePodsHavingTooManyRestarts): use test.Set...OwnerRef instead 2025-12-15 11:03:45 +01:00
Jan Chaloupka
6ebb0b7aa7 refactor(TestRemovePodsHavingTooManyRestarts): extract setPodContainerStatusRestartCount helper 2025-12-15 00:23:03 +01:00
Kubernetes Prow Robot
bb01360776 Merge pull request #1793 from ingvagabund/duplicates-unit-test
refactor(TestRemoveDuplicates): reduce test code duplication
2025-12-14 08:35:44 -08:00
Jan Chaloupka
c8bc668e04 refactor(TestRemoveDuplicatesUniformly): reduce duplication in setTolerations 2025-12-14 17:05:42 +01:00
Jan Chaloupka
b64426888b refactor(TestRemoveDuplicatesUniformly): reduce duplication in setWorkerLabelSelector 2025-12-14 17:05:21 +01:00
Jan Chaloupka
1306cf38a1 refactor(TestRemoveDuplicatesUniformly): reduce duplication in setNotMasterNodeSelector 2025-12-14 17:04:18 +01:00
Jan Chaloupka
bc06d1be83 refactor: replace test.BuildTestPod with buildTestPodForNode 2025-12-14 16:40:55 +01:00
Jan Chaloupka
c9e87bb97d Merge pull request #1792 from ingvagabund/duplicates-unit-test
refactor(TestFindDuplicatePods): reduce duplicates and inline
2025-12-14 17:27:11 +02:00
Jan Chaloupka
05b6d5e343 refactor(TestFindDuplicatePods): remove leftover comments 2025-12-14 16:16:24 +01:00
Jan Chaloupka
044f75dcec refactor(TestFindDuplicatePods): inline node6 creation 2025-12-14 16:16:22 +01:00
Jan Chaloupka
6e62af3dbf refactor(TestFindDuplicatePods): inline node5 creation 2025-12-14 16:16:20 +01:00
Jan Chaloupka
2fac727be3 refactor(TestFindDuplicatePods): inline node4 creation 2025-12-14 16:16:17 +01:00
Jan Chaloupka
babc4137a4 refactor(TestFindDuplicatePods): inline node3 creation 2025-12-14 16:16:12 +01:00
Jan Chaloupka
fc033caf21 refactor(TestFindDuplicatePods): inline node2 creation 2025-12-14 16:16:10 +01:00
Jan Chaloupka
fd524f2172 refactor(TestFindDuplicatePods): inline node1 creation 2025-12-14 16:07:27 +01:00
Jan Chaloupka
47275831ab refactor(TestFindDuplicatePods): apply buildTestNode helper to node variables
refactor(TestRemoveDuplicatesUniformly): apply buildTestNode helper
2025-12-14 16:00:50 +01:00
Jan Chaloupka
b8b0fa0565 refactor(TestFindDuplicatePods): inline p20 pod creation 2025-12-14 14:27:05 +01:00
Jan Chaloupka
daaa5896a9 refactor(TestFindDuplicatePods): inline p19 pod creation 2025-12-14 14:26:23 +01:00
Jan Chaloupka
e27864717d refactor(TestFindDuplicatePods): inline p17 pod creation 2025-12-14 14:25:26 +01:00
Jan Chaloupka
e8cf01591e refactor(TestFindDuplicatePods): inline p16 pod creation 2025-12-14 14:24:36 +01:00
Jan Chaloupka
d7766cccfd refactor(TestFindDuplicatePods): inline p15 pod creation 2025-12-14 14:22:22 +01:00
Jan Chaloupka
3ebffe5a86 refactor(TestFindDuplicatePods): inline p13 pod creation 2025-12-14 14:20:48 +01:00
Jan Chaloupka
4e758c18e8 refactor(TestFindDuplicatePods): inline p12 pod creation 2025-12-14 14:19:19 +01:00
Jan Chaloupka
1c494f9c44 refactor(TestFindDuplicatePods): inline p11 pod creation 2025-12-14 14:17:37 +01:00
Jan Chaloupka
45dfe3011c refactor(TestFindDuplicatePods): inline p10 pod creation 2025-12-14 14:11:13 +01:00
Jan Chaloupka
eeb459d6d4 refactor(TestFindDuplicatePods): inline p9 pod creation 2025-12-14 14:10:18 +01:00
Jan Chaloupka
f3d91fc69f refactor(TestFindDuplicatePods): inline p8 pod creation 2025-12-14 14:04:23 +01:00
Jan Chaloupka
e9dcd4e54d refactor(TestFindDuplicatePods): inline p7 pod creation 2025-12-14 14:03:22 +01:00
Jan Chaloupka
8490ed9c8f refactor(TestFindDuplicatePods): inline p6 pod creation 2025-12-14 14:01:21 +01:00
Jan Chaloupka
01fb826bd3 refactor(TestFindDuplicatePods): inline p5 pod creation 2025-12-14 13:58:38 +01:00
Jan Chaloupka
9b50aa91f8 refactor(TestFindDuplicatePods): inline p4 pod creation 2025-12-14 13:44:24 +01:00
Jan Chaloupka
7a5bf8c2f0 refactor(TestFindDuplicatePods): inline p3 pod creation 2025-12-14 13:43:34 +01:00
Jan Chaloupka
df06442830 refactor(TestFindDuplicatePods): inline p2 pod creation 2025-12-14 13:39:56 +01:00
Jan Chaloupka
180548cc1a refactor(TestFindDuplicatePods): inline p1 pod creation 2025-12-14 13:37:23 +01:00
Jan Chaloupka
0aee6cff48 refactor(TestFindDuplicatePods): introduce buildTestPodWithRSOwnerRefWithNamespaceForNode1 helper 2025-12-14 13:33:56 +01:00
Jan Chaloupka
7a0257a682 refactor(TestFindDuplicatePods): introduce buildTestPodWithRSOwnerRefForNode1 helper 2025-12-14 13:25:40 +01:00
Jan Chaloupka
f5253faeb0 refactor(TestFindDuplicatePods): set owner refs through a dedicated function 2025-12-14 13:25:34 +01:00
Jan Chaloupka
59f499e2cd refactor(TestFindDuplicatePods): replace direct ownerRef assignment with test.SetRSOwnerRef 2025-12-14 13:25:14 +01:00
Jan Chaloupka
008265db9b refactor(TestFindDuplicatePods): consolidate ownerRef as all the cases produce the same owner reference 2025-12-14 13:25:09 +01:00
Jan Chaloupka
61190b805b refactor(TestFindDuplicatePods): buildTestPodForNode1 for default testing configuration 2025-12-14 13:24:03 +01:00
Jan Chaloupka
8c83840bf9 Merge pull request #1791 from ingvagabund/duplicates-unit-test
refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits
2025-12-14 14:05:17 +02:00
Jan Chaloupka
e46b5db6d5 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:35 +01:00
Jan Chaloupka
b21fb4a655 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:35 +01:00
Jan Chaloupka
8f3c5f4978 refactor(TestFindDuplicatePods): drop unused variable 2025-12-14 12:49:27 +01:00
Jan Chaloupka
6f94e19385 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
3bb99512d8 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
56f49bc78f refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
800dd280cd refactor(TestFindDuplicatePods): drop unused variable 2025-12-14 12:49:17 +01:00
Jan Chaloupka
8dada79593 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:05 +01:00
Jan Chaloupka
660e2dba40 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:03 +01:00
Jan Chaloupka
294ce39231 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:01 +01:00
Jan Chaloupka
f2031ddcb0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:59 +01:00
Jan Chaloupka
7435b5d474 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:56 +01:00
Jan Chaloupka
b5f177efa0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:54 +01:00
Jan Chaloupka
4a4ec4afb7 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:52 +01:00
Jan Chaloupka
0c33be962d refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:50 +01:00
Jan Chaloupka
511ed214b0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:48 +01:00
Jan Chaloupka
3d4263bf5e refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:45 +01:00
Jan Chaloupka
96171413ba refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:43 +01:00
Jan Chaloupka
5578211253 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:38 +01:00
Jan Chaloupka
08c2fc7621 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:35 +01:00
Kubernetes Prow Robot
9e45259399 Merge pull request #1790 from ingvagabund/podlifetime-unit-tests
refactor(TestPodLifeTime): split the unit tests into smaller semantically close groups
2025-12-14 02:19:43 -08:00
Jan Chaloupka
e5bbedb602 refactor(TestPodLifeTime): extract generic filtering tests 2025-12-13 14:27:13 +01:00
Jan Chaloupka
2710fd3781 refactor(TestPodLifeTime): extract evictor configuration tests 2025-12-13 14:06:24 +01:00
Jan Chaloupka
2658864ac0 refactor(TestPodLifeTime): extract eviction limits tests 2025-12-13 14:06:18 +01:00
Jan Chaloupka
e05de87368 refactor(TestPodLifeTime): extract pod status reason tests 2025-12-13 14:06:10 +01:00
Jan Chaloupka
293a9ca4b7 refactor(TestPodLifeTime): extract container waiting reason tests 2025-12-13 14:06:04 +01:00
Jan Chaloupka
83151219e7 refactor(TestPodLifeTime): extract pod phase state tests 2025-12-13 14:05:18 +01:00
Jan Chaloupka
fb0bddf85d refactor(TestPodLifeTime): extract age threshold tests 2025-12-13 14:04:26 +01:00
Jan Chaloupka
286f2848fc refactor(TestPodLifeTime): add shared test infrastructure 2025-12-13 14:03:21 +01:00
Jan Chaloupka
d8d997a25d refactor(TestPodLifeTime): extract helper functions to package level 2025-12-13 13:58:26 +01:00
Kubernetes Prow Robot
5d7a483dc8 Merge pull request #1789 from ingvagabund/refactorings
feat(PodLifeTime): document the plugin with details that can be used for reasoning during reviews and design discussions
2025-12-12 04:54:12 -08:00
Jan Chaloupka
58076dd162 feat(PodLifeTime): document the plugin with details that can be used for
reasoning during reviews and design discussions
2025-12-12 11:55:27 +01:00
Kubernetes Prow Robot
b6e81fdd4b Merge pull request #1787 from ingvagabund/refactorings
feat(TestPodLifeTime): check only expected pods are evicted
2025-12-11 11:37:31 -08:00
Jan Chaloupka
59dfd041a8 feat(TestPodLifeTime): check only expected pods are evicted 2025-12-11 17:08:25 +01:00
Kubernetes Prow Robot
7bf29ce56d Merge pull request #1785 from ingvagabund/refactorings
refactor(TestPodLifeTime): update unit test names and simplify pod creation
2025-12-11 04:35:30 -08:00
Jan Chaloupka
c77f1a4ed2 refactor(TestPodLifeTime): update test names to better correspond to their purpose 2025-12-11 13:04:33 +01:00
Jan Chaloupka
7e14c6c7c4 refactor(TestPodLifeTime): drop applyPodsFunc function 2025-12-11 13:03:47 +01:00
Jan Chaloupka
b4a0b8dbac fix(TestPodLifeTime): rename dev ns to default 2025-12-11 13:03:40 +01:00
Kubernetes Prow Robot
680b10099d Merge pull request #1784 from ingvagabund/refactorings
refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates
2025-12-10 09:15:41 -08:00
Jan Chaloupka
e92dda1a37 Merge pull request #1783 from ingvagabund/refactorings
refactor(TestPodLifeTime): consolidations, simplifications and node instance for each unit test
2025-12-10 17:38:21 +02:00
Jan Chaloupka
07dc0c61c5 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:33:28 +01:00
Jan Chaloupka
cab310e55c refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:54 +01:00
Jan Chaloupka
822a1d4c40 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:30 +01:00
Jan Chaloupka
1d7368b58d refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:05 +01:00
Jan Chaloupka
70a71f54bc refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:31:31 +01:00
Jan Chaloupka
3ea0eadcb3 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:31:25 +01:00
Jan Chaloupka
41a0a9c994 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:30:30 +01:00
Jan Chaloupka
c707f53cec refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:28:20 +01:00
Jan Chaloupka
9be42e50cc refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:55:12 +01:00
Jan Chaloupka
bed39d70f0 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:54:04 +01:00
Jan Chaloupka
8a0fd10315 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:53:19 +01:00
Jan Chaloupka
5e6cd6057b refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:52:24 +01:00
Jan Chaloupka
b857869371 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:47:08 +01:00
Jan Chaloupka
3e764eb564 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:45:53 +01:00
Jan Chaloupka
2648749eb8 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:45:10 +01:00
Jan Chaloupka
ff43002060 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:43:07 +01:00
Jan Chaloupka
4f42a7ae9b refactor(TestPodLifeTime): consolidate defaults when building a test pod with RS owner reference 2025-12-10 15:34:48 +01:00
Jan Chaloupka
7d84b68556 refactor(TestPodLifeTime): consolidate defaults when building a test pod 2025-12-10 15:29:12 +01:00
Jan Chaloupka
5b4719634c refactor(TestPodLifeTime): the default pod namespace will work the same way as the 'dev' one 2025-12-10 15:29:03 +01:00
Jan Chaloupka
94a0fbdcbb refactor(TestPodLifeTime): inline node creation in each unit test to avoid accidental node spec updates 2025-12-10 15:28:57 +01:00
Jan Chaloupka
bbc3eef1c9 refactor(TestPodLifeTime): replace test.GetReplicaSetOwnerRefList with test.SetRSOwnerRef
To make the assigment shorter and unified
2025-12-10 15:28:51 +01:00
Jan Chaloupka
3a3e72e9c2 refactor(TestPodLifeTime): consolidate all owner references
test.GetReplicaSetOwnerRefList produces the same value everything it's
invoked.
2025-12-10 15:28:45 +01:00
Kubernetes Prow Robot
e6c14a365f Merge pull request #1782 from ingvagabund/refactorings
refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits
2025-12-10 06:23:30 -08:00
Kubernetes Prow Robot
2b2ab0b9ad Merge pull request #1781 from ingvagabund/podlifetime-unittest-dry
refactor(TestPodLifeTime): remove ineffective owner references assignments
2025-12-10 05:39:30 -08:00
Jan Chaloupka
16b9311e9e refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:23 +01:00
Jan Chaloupka
1a61470e81 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:14 +01:00
Jan Chaloupka
c02779b6a5 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:09 +01:00
Jan Chaloupka
ff6363692c refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:04 +01:00
Jan Chaloupka
34540c3c95 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:00 +01:00
Jan Chaloupka
ee40f7ff30 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:55 +01:00
Jan Chaloupka
cece2ee3cc refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:50 +01:00
Jan Chaloupka
fbdf86fdfd refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:39 +01:00
Jan Chaloupka
7bfd4088ce refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:05 +01:00
Jan Chaloupka
18f61b5e64 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:12 +01:00
Jan Chaloupka
769b4fe34a refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:07 +01:00
Jan Chaloupka
6ffc7e3975 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:03 +01:00
Jan Chaloupka
31af0d8223 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:59 +01:00
Jan Chaloupka
0c80f3689d refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:53 +01:00
Jan Chaloupka
9722018847 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:48 +01:00
Jan Chaloupka
47cfdf7057 refactor(TestPodLifeTime): remove ineffective owner references assignments
Pods p5 and p6 already have an owner assigned. Also,
test.GetReplicaSetOwnerRefList() produces the same list of owner references.
2025-12-10 14:08:06 +01:00
Kubernetes Prow Robot
db6d460677 Merge pull request #1764 from abelfodil/master
Fix "Current requires cgo or $USER set in environment" error
2025-11-15 00:25:38 -08:00
Anes Belfodil
237d9c1a7b fix: provide USER env var to correctly initialize tracing
This is done to prevent "Current requires cgo or $USER set in environment" error during
tracing initialization.
2025-11-15 01:40:08 -05:00
Kubernetes Prow Robot
5b66733ada Merge pull request #1772 from Sycrosity/master
docs: fix README.md link to kubernetes bot commands
2025-11-07 20:52:51 -08:00
Kubernetes Prow Robot
eb1b91d085 Merge pull request #1773 from petersalas/update-readme
docs: fix incorrect gracePeriodSeconds default in README.md
2025-11-07 20:22:52 -08:00
Peter Salas
058056d965 docs: fix incorrect gracePeriodSeconds default in README.md 2025-11-04 12:11:02 -08:00
Sycrosity
f9aa969791 docs: fix link to kubernetes bot commands page 2025-11-03 00:29:17 +00:00
Sycrosity
4bbfa08dfb docs: Have kustomize suggested commands use latest release 2025-11-03 00:25:22 +00:00
Kubernetes Prow Robot
4b7c2c90ea Merge pull request #1771 from a7i/native-sidecar
fix: pod resource calculation to consider native sidecars
2025-11-02 02:06:02 -08:00
Amir Alavi
06cab8e2aa fix: pod resource calculation to consider native sidecars
previously, descheduler code had copied an old version of PodRequestsAndLimits which does not consider native sidecars
it will now rely on resourcehelper libs, which will continue to get upstream updates

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-11-01 10:48:00 -04:00
Kubernetes Prow Robot
582641c2e9 Merge pull request #1752 from ricardomaraschini/create-protection-for-pods-using-storage-class
feat: enable pod protection based on storage classes
2025-10-30 14:18:03 -07:00
Ricardo Maraschini
d9d6ca64e9 feat: enable pod protection based on storage classes
this commit introduces a new customization on the existing PodsWithPVC
protection. this new customization allow users to make pods that refer
to a given storage class unevictable.

for example, to protect pods referring to `storage-class-0` and
`storage-class-1` this configuration can be used:

```yaml
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
  pluginConfig:
  - name: "DefaultEvictor"
    args:
      podProtections:
        extraEnabled:
        - PodsWithPVC
        config:
          PodsWithPVC:
            protectedStorageClasses:
            - name: storage-class-0
            - name: storage-class-1
```

changes introduced by this pr:

1. the descheduler starts to observe persistent volume claims.
1. a new api field was introduced to allow per pod protection config.
1. rbac had to be adjusted (+persistentvolumeclaims).
2025-10-29 11:21:10 +01:00
64 changed files with 4276 additions and 2351 deletions

View File

@@ -0,0 +1,30 @@
# Descheduler Design Constraints
This is a slowly growing document that lists good practices, conventions, and design decisions.
## Overview
TBD
## Code convention
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
## Unit Test Conventions
These are the known conventions that are useful to practice whenever reasonable:
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
The aim is to reduce cognitive load when reading and debugging the test code.
## Design Decisions FAQ
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.

View File

@@ -94,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.34' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.34' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.34' | kubectl apply -f -
```
## User Guide
@@ -129,7 +129,7 @@ These are top level keys in the Descheduler Policy that you can use to configure
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
| `gracePeriodSeconds` | `int` | `nil` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. |
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
@@ -189,6 +189,31 @@ The Default Evictor Plugin is used by default for filtering pods before processi
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
#### Protecting pods using specific Storage Classes
With the `PodsWithPVC` protection enabled all pods using PVCs are protected from eviction by default, if needed you can restrict the protection by filtering by PVC storage class. When filtering out by storage class, only pods using PVCs with the specified storage classes are protected from eviction. For example:
```yaml
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- PodsWithPVC
config:
PodsWithPVC:
protectedStorageClasses:
- name: storage-class-0
- name: storage-class-1
```
This example will protect pods using PVCs with storage classes `storage-class-0` and `storage-class-1` from eviction.
### Example policy
As part of the policy, you will start deciding which top level configuration to use, then which Evictor plugin to use (if you have your own, the Default Evictor if not), followed by deciding the configuration passed to the Evictor Plugin. By default, the Default Evictor is enabled for both `filter` and `preEvictionFilter` extension points. After that you will enable/disable eviction strategies plugins and configure them properly.
@@ -229,6 +254,7 @@ profiles:
#- "PodsWithPVC"
#- "PodsWithoutPDB"
#- "PodsWithResourceClaims"
config: {}
nodeFit: true
minReplicas: 2
plugins:
@@ -1152,7 +1178,7 @@ that the only people who can get things done around here are the "maintainers".
We also would love to add more "official" maintainers, so show us what you can
do!
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
This repository uses the Kubernetes bots. See a full list of the commands [here](https://go.k8s.io/bot-commands).
### Communicating With Contributors

View File

@@ -35,6 +35,9 @@ rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["nodes", "pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "watch", "list"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -164,7 +164,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
) // Used by the defaultevictor plugin
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
@@ -414,7 +414,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End()
var profileRunners []profileRunner
for _, profile := range d.deschedulerPolicy.Profiles {
for idx, profile := range d.deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
ctx,
profile,
@@ -425,6 +425,9 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
frameworkprofile.WithMetricsCollector(d.metricsCollector),
frameworkprofile.WithPrometheusClient(d.prometheusClient),
// Generate a unique instance ID using just the index to avoid long IDs
// when profile names are very long
frameworkprofile.WithProfileInstanceID(fmt.Sprintf("%d", idx)),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)

View File

@@ -241,7 +241,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
return false, fmt.Errorf("insufficient %v", resource)
}
}
// check pod num, at least one pod number is avaibalbe
// check pod num, at least one pod number is available
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
}

View File

@@ -25,9 +25,11 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/ptr"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
@@ -78,9 +80,24 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
// First verify nodeLister returns non-empty list
allNodes, err := nodeLister.List(labels.Everything())
if err != nil {
t.Fatalf("Failed to list nodes from nodeLister: %v", err)
}
if len(allNodes) == 0 {
t.Fatal("Expected nodeLister to return non-empty list of nodes")
}
if len(allNodes) != 2 {
t.Errorf("Expected nodeLister to return 2 nodes, got %d", len(allNodes))
}
// Now test ReadyNodes
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
if nodes[0].Name != "node1" {
if len(nodes) != 1 {
t.Errorf("Expected 1 node, got %d", len(nodes))
} else if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
}
}
@@ -1020,6 +1037,64 @@ func TestNodeFit(t *testing.T) {
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with native sidecars with too much cpu does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100000, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with native sidecars with too much memory does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient memory"),
},
{
description: "Pod with small native sidecars fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with large overhead does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with small overhead fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
},
}
for _, tc := range tests {

View File

@@ -23,6 +23,7 @@ type HandleImpl struct {
PodEvictorImpl *evictions.PodEvictor
MetricsCollectorImpl *metricscollector.MetricsCollector
PrometheusClientImpl promapi.Client
PluginInstanceIDImpl string
}
var _ frameworktypes.Handle = &HandleImpl{}
@@ -62,3 +63,7 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
}
func (hi *HandleImpl) PluginInstanceID() string {
return hi.PluginInstanceIDImpl
}

View File

@@ -73,6 +73,22 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
}
}
func NewPluginFncFromFakeWithReactor(fp *FakePlugin, callback func(ActionImpl)) pluginregistry.PluginBuilder {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
}
fp.handle = handle
fp.args = fakePluginArgs
callback(ActionImpl{handle: fp.handle})
return fp, nil
}
}
// New builds plugin from its arguments while passing a handle
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakePluginArgs)
@@ -408,3 +424,55 @@ func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
}
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
}
// RegisterFakePlugin registers a FakePlugin with the given registry
func RegisterFakePlugin(name string, plugin *FakePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewPluginFncFromFake(plugin),
&FakePlugin{},
&FakePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeDeschedulePlugin registers a FakeDeschedulePlugin with the given registry
func RegisterFakeDeschedulePlugin(name string, plugin *FakeDeschedulePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeDeschedulePluginFncFromFake(plugin),
&FakeDeschedulePlugin{},
&FakeDeschedulePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeBalancePlugin registers a FakeBalancePlugin with the given registry
func RegisterFakeBalancePlugin(name string, plugin *FakeBalancePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeBalancePluginFncFromFake(plugin),
&FakeBalancePlugin{},
&FakeBalancePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeFilterPlugin registers a FakeFilterPlugin with the given registry
func RegisterFakeFilterPlugin(name string, plugin *FakeFilterPlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeFilterPluginFncFromFake(plugin),
&FakeFilterPlugin{},
&FakeFilterPluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -17,11 +17,13 @@ import (
"context"
"errors"
"fmt"
"maps"
"slices"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
@@ -122,13 +124,67 @@ func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtect
applyFailedBarePodsProtection(d, protectionMap)
applyLocalStoragePodsProtection(d, protectionMap)
applyDaemonSetPodsProtection(d, protectionMap)
applyPvcPodsProtection(d, protectionMap)
applyPVCPodsProtection(d, protectionMap)
applyPodsWithoutPDBProtection(d, protectionMap, handle)
applyPodsWithResourceClaimsProtection(d, protectionMap)
return nil
}
// protectedPVCStorageClasses returns the list of storage classes that should
// be protected from eviction. If the list is empty or nil then all storage
// classes are protected (assuming PodsWithPVC protection is enabled).
func protectedPVCStorageClasses(d *DefaultEvictor) []ProtectedStorageClass {
protcfg := d.args.PodProtections.Config
if protcfg == nil {
return nil
}
scconfig := protcfg.PodsWithPVC
if scconfig == nil {
return nil
}
return scconfig.ProtectedStorageClasses
}
// podStorageClasses returns a list of storage classes referred by a pod. We
// need this when assessing if a pod should be protected because it refers to a
// protected storage class.
func podStorageClasses(inf informers.SharedInformerFactory, pod *v1.Pod) ([]string, error) {
lister := inf.Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(
pod.Namespace,
)
referred := map[string]bool{}
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim == nil {
continue
}
claim, err := lister.Get(vol.PersistentVolumeClaim.ClaimName)
if err != nil {
return nil, fmt.Errorf(
"failed to get persistent volume claim %q/%q: %w",
pod.Namespace, vol.PersistentVolumeClaim.ClaimName, err,
)
}
// this should never happen as once a pvc is created with a nil
// storageClass it is automatically picked up by the default
// storage class. By returning an error here we make the pod
// protected from eviction.
if claim.Spec.StorageClassName == nil || *claim.Spec.StorageClassName == "" {
return nil, fmt.Errorf(
"failed to resolve storage class for pod %q/%q",
pod.Namespace, claim.Name,
)
}
referred[*claim.Spec.StorageClassName] = true
}
return slices.Collect(maps.Keys(referred)), nil
}
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[FailedBarePods]
if !isProtectionEnabled {
@@ -206,16 +262,50 @@ func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtec
}
}
func applyPvcPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[PodsWithPVC]
if isProtectionEnabled {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithPVC(pod) {
return fmt.Errorf("pod with PVC is protected against eviction")
// applyPVCPodsProtection protects pods that refer to a PVC from eviction. If
// the user has specified a list of storage classes to protect then only pods
// referring to PVCs of those storage classes are protected.
func applyPVCPodsProtection(d *DefaultEvictor, enabledProtections map[PodProtection]bool) {
if !enabledProtections[PodsWithPVC] {
return
}
// if the user isn't filtering by storage classes we protect all pods
// referring to a PVC.
protected := protectedPVCStorageClasses(d)
if len(protected) == 0 {
d.constraints = append(
d.constraints,
func(pod *v1.Pod) error {
if utils.IsPodWithPVC(pod) {
return fmt.Errorf("pod with PVC is protected against eviction")
}
return nil
},
)
return
}
protectedsc := map[string]bool{}
for _, class := range protected {
protectedsc[class.Name] = true
}
d.constraints = append(
d.constraints, func(pod *v1.Pod) error {
classes, err := podStorageClasses(d.handle.SharedInformerFactory(), pod)
if err != nil {
return err
}
for _, class := range classes {
if !protectedsc[class] {
continue
}
return fmt.Errorf("pod using protected storage class %q", class)
}
return nil
})
}
},
)
}
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {

File diff suppressed because it is too large Load Diff

View File

@@ -75,6 +75,37 @@ type PodProtections struct {
// DefaultDisabled specifies which default protection policies should be disabled.
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
// Config holds configuration for pod protection policies. Depending on
// the enabled policies this may be required. For instance, when
// enabling the PodsWithPVC policy the user may specify which storage
// classes should be protected.
Config *PodProtectionsConfig `json:"config,omitempty"`
}
// PodProtectionsConfig holds configuration for pod protection policies. The
// name of the fields here must be equal to a protection name. This struct is
// meant to be extended as more protection policies are added.
// +k8s:deepcopy-gen=true
type PodProtectionsConfig struct {
PodsWithPVC *PodsWithPVCConfig `json:"PodsWithPVC,omitempty"`
}
// PodsWithPVCConfig holds configuration for the PodsWithPVC protection.
// +k8s:deepcopy-gen=true
type PodsWithPVCConfig struct {
// ProtectedStorageClasses is a list of storage classes that we want to
// protect. i.e. if a pod refers to one of these storage classes it is
// protected from being evicted. If none is provided then all pods with
// PVCs are protected from eviction.
ProtectedStorageClasses []ProtectedStorageClass `json:"protectedStorageClasses,omitempty"`
}
// ProtectedStorageClass is used to determine what storage classes are
// protected when the PodsWithPVC protection is enabled. This object exists
// so we can later on extend it with more configuration if needed.
type ProtectedStorageClass struct {
Name string `json:"name"`
}
// defaultPodProtections holds the list of protection policies that are enabled by default.

View File

@@ -72,6 +72,17 @@ func ValidateDefaultEvictorArgs(obj runtime.Object) error {
if hasDuplicates(args.PodProtections.ExtraEnabled) {
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
}
if slices.Contains(args.PodProtections.ExtraEnabled, PodsWithPVC) {
if args.PodProtections.Config != nil && args.PodProtections.Config.PodsWithPVC != nil {
protectedsc := args.PodProtections.Config.PodsWithPVC.ProtectedStorageClasses
for i, sc := range protectedsc {
if sc.Name == "" {
allErrs = append(allErrs, fmt.Errorf("PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[%d] name cannot be empty", i))
}
}
}
}
}
return utilerrors.NewAggregate(allErrs)

View File

@@ -198,6 +198,33 @@ func TestValidateDefaultEvictorArgs(t *testing.T) {
},
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
},
{
name: "Protected storage classes without storage class name",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{PodsWithPVC},
Config: &PodProtectionsConfig{
PodsWithPVC: &PodsWithPVCConfig{
ProtectedStorageClasses: []ProtectedStorageClass{
{
Name: "",
},
{
Name: "protected-storage-class-0",
},
{
Name: "",
},
{
Name: "protected-storage-class-1",
},
},
},
},
},
},
errInfo: fmt.Errorf(`[PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[0] name cannot be empty, PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[2] name cannot be empty]`),
},
}
for _, testCase := range tests {

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -81,6 +81,11 @@ func (in *PodProtections) DeepCopyInto(out *PodProtections) {
*out = make([]PodProtection, len(*in))
copy(*out, *in)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(PodProtectionsConfig)
(*in).DeepCopyInto(*out)
}
return
}
@@ -93,3 +98,45 @@ func (in *PodProtections) DeepCopy() *PodProtections {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodProtectionsConfig) DeepCopyInto(out *PodProtectionsConfig) {
*out = *in
if in.PodsWithPVC != nil {
in, out := &in.PodsWithPVC, &out.PodsWithPVC
*out = new(PodsWithPVCConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtectionsConfig.
func (in *PodProtectionsConfig) DeepCopy() *PodProtectionsConfig {
if in == nil {
return nil
}
out := new(PodProtectionsConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsWithPVCConfig) DeepCopyInto(out *PodsWithPVCConfig) {
*out = *in
if in.ProtectedStorageClasses != nil {
in, out := &in.ProtectedStorageClasses, &out.ProtectedStorageClasses
*out = make([]ProtectedStorageClass, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsWithPVCConfig.
func (in *PodsWithPVCConfig) DeepCopy() *PodsWithPVCConfig {
if in == nil {
return nil
}
out := new(PodsWithPVCConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
@@ -97,17 +96,7 @@ func TestHighNodeUtilization(t *testing.T) {
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -115,8 +104,7 @@ func TestHighNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
@@ -168,8 +156,7 @@ func TestHighNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
@@ -249,9 +236,7 @@ func TestHighNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
@@ -466,9 +451,7 @@ func TestHighNodeUtilization(t *testing.T) {
// pods in the other nodes must not be evicted
// because they do not have the extended
// resource defined in their requests.
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),

View File

@@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -92,25 +91,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -155,25 +143,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -233,25 +210,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -310,17 +276,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -328,8 +284,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -379,17 +334,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -397,8 +342,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -462,17 +406,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -480,8 +414,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -525,9 +458,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
@@ -537,23 +468,11 @@ func TestLowNodeUtilization(t *testing.T) {
test.MakeBestEffortPod(pod)
}),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
}),
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -561,8 +480,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -632,17 +550,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -651,8 +559,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetNormalOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
@@ -747,12 +654,8 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
}),
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 0, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p8", 0, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -795,17 +698,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -813,8 +706,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -872,17 +764,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -890,8 +772,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -975,17 +856,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -993,8 +864,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1037,17 +907,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1055,8 +915,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1106,17 +965,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1124,8 +973,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
},
@@ -1218,17 +1066,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1236,8 +1074,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1282,25 +1119,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1575,17 +1401,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
func withLocalStorage(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}
@@ -1594,8 +1410,7 @@ func withCriticalPod(pod *v1.Pod) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -0,0 +1,155 @@
# PodLifeTime Plugin
## What It Does
The PodLifeTime plugin evicts pods that have been running for too long. You can configure a maximum age threshold, and the plugin evicts pods older than that threshold. The oldest pods are evicted first.
## How It Works
The plugin examines all pods across your nodes and selects those that exceed the configured age threshold. You can further narrow down which pods are considered by specifying:
- Which namespaces to include or exclude
- Which labels pods must have
- Which states pods must be in (e.g., Running, Pending, CrashLoopBackOff)
Once pods are selected, they are sorted by age (oldest first) and evicted in that order. Eviction stops when limits are reached (per-node limits, total limits, or Pod Disruption Budget constraints).
## Use Cases
- **Resource Leakage Mitigation**: Restart long-running pods that may have accumulated memory leaks, stale cache, or resource leaks
```yaml
args:
maxPodLifeTimeSeconds: 604800 # 7 days
states: [Running]
```
- **Ephemeral Workload Cleanup**: Remove long-running batch jobs, test pods, or temporary workloads that have exceeded their expected lifetime
```yaml
args:
maxPodLifeTimeSeconds: 7200 # 2 hours
states: [Succeeded, Failed]
```
- **Node Hygiene**: Remove forgotten or stuck pods that are consuming resources but not making progress
```yaml
args:
maxPodLifeTimeSeconds: 3600 # 1 hour
states: [CrashLoopBackOff, ImagePullBackOff, ErrImagePull]
includingInitContainers: true
```
- **Config/Secret Update Pickup**: Force pod restart to pick up updated ConfigMaps, Secrets, or environment variables
```yaml
args:
maxPodLifeTimeSeconds: 86400 # 1 day
states: [Running]
labelSelector:
matchLabels:
config-refresh: enabled
```
- **Security Rotation**: Periodically refresh pods to pick up new security tokens, certificates, or patched container images
```yaml
args:
maxPodLifeTimeSeconds: 259200 # 3 days
states: [Running]
namespaces:
exclude: [kube-system]
```
- **Dev/Test Environment Cleanup**: Automatically clean up old pods in development or staging namespaces
```yaml
args:
maxPodLifeTimeSeconds: 86400 # 1 day
namespaces:
include: [dev, staging, test]
```
- **Cluster Health Freshness**: Ensure pods periodically restart to maintain cluster health and verify workloads can recover from restarts
```yaml
args:
maxPodLifeTimeSeconds: 604800 # 7 days
states: [Running]
namespaces:
exclude: [kube-system, production]
```
- **Rebalancing Assistance**: Work alongside other descheduler strategies by removing old pods to allow better pod distribution
```yaml
args:
maxPodLifeTimeSeconds: 1209600 # 14 days
states: [Running]
```
- **Non-Critical Stateful Refresh**: Occasionally reset tolerable stateful workloads that can handle data loss or have external backup mechanisms
```yaml
args:
maxPodLifeTimeSeconds: 2592000 # 30 days
labelSelector:
matchLabels:
stateful-tier: cache
```
## Configuration
| Parameter | Description | Type | Required | Default |
|-----------|-------------|------|----------|---------|
| `maxPodLifeTimeSeconds` | Pods older than this many seconds are evicted | `uint` | Yes | - |
| `namespaces` | Limit eviction to specific namespaces (or exclude specific namespaces) | `Namespaces` | No | `nil` |
| `labelSelector` | Only evict pods matching these labels | `metav1.LabelSelector` | No | `nil` |
| `states` | Only evict pods in specific states (e.g., Running, CrashLoopBackOff) | `[]string` | No | `nil` |
| `includingInitContainers` | When checking states, also check init container states | `bool` | No | `false` |
| `includingEphemeralContainers` | When checking states, also check ephemeral container states | `bool` | No | `false` |
### Discovering states
Each pod is checked for the following locations to discover its relevant state:
1. **Pod Phase** - The overall pod lifecycle phase:
- `Running` - Pod is running on a node
- `Pending` - Pod has been accepted but containers are not yet running
- `Succeeded` - All containers terminated successfully
- `Failed` - All containers terminated, at least one failed
- `Unknown` - Pod state cannot be determined
2. **Pod Status Reason** - Why the pod is in its current state:
- `NodeAffinity` - Pod cannot be scheduled due to node affinity rules
- `NodeLost` - Node hosting the pod is lost
- `Shutdown` - Pod terminated due to node shutdown
- `UnexpectedAdmissionError` - Pod admission failed unexpectedly
3. **Container Waiting Reason** - Why containers are waiting to start:
- `PodInitializing` - Pod is still initializing
- `ContainerCreating` - Container is being created
- `ImagePullBackOff` - Image pull is failing and backing off
- `CrashLoopBackOff` - Container is crashing repeatedly
- `CreateContainerConfigError` - Container configuration is invalid
- `ErrImagePull` - Image cannot be pulled
- `CreateContainerError` - Container creation failed
- `InvalidImageName` - Image name is invalid
By default, only regular containers are checked. Enable `includingInitContainers` or `includingEphemeralContainers` to also check those container types.
## Example
```yaml
apiVersion: descheduler/v1alpha2
kind: DeschedulerPolicy
profiles:
- name: default
plugins:
deschedule:
enabled:
- name: PodLifeTime
pluginConfig:
- name: PodLifeTime
args:
maxPodLifeTimeSeconds: 86400 # 1 day
namespaces:
include:
- default
states:
- Running
```
This configuration evicts Running pods in the `default` namespace that are older than 1 day.

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -21,7 +21,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -33,8 +32,25 @@ import (
"sigs.k8s.io/descheduler/test"
)
func buildTestPodWithImage(podName, node, image string) *v1.Pod {
pod := test.BuildTestPod(podName, 100, 0, node, test.SetRSOwnerRef)
const (
nodeName1 = "n1"
nodeName2 = "n2"
nodeName3 = "n3"
nodeName4 = "n4"
nodeName5 = "n5"
nodeName6 = "n6"
)
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
}
func buildTestPodForNode(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName, apply)
}
func buildTestPodWithImage(podName, image string) *v1.Pod {
pod := buildTestPodForNode(podName, nodeName1, test.SetRSOwnerRef)
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: image,
Image: image,
@@ -42,144 +58,25 @@ func buildTestPodWithImage(podName, node, image string) *v1.Pod {
return pod
}
func buildTestPodWithRSOwnerRefForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
return buildTestPodForNode(name, nodeName1, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
if apply != nil {
apply(pod)
}
})
}
func buildTestPodWithRSOwnerRefWithNamespaceForNode1(name, namespace string, apply func(*v1.Pod)) *v1.Pod {
return buildTestPodWithRSOwnerRefForNode1(name, func(pod *v1.Pod) {
pod.Namespace = namespace
if apply != nil {
apply(pod)
}
})
}
func TestFindDuplicatePods(t *testing.T) {
// first setup pods
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
})
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node5 := test.BuildTestNode("n5", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node6 := test.BuildTestNode("n6", 200, 200, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p3.Namespace = "dev"
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p7.Namespace = "kube-system"
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p8.Namespace = "test"
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p9.Namespace = "test"
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p10.Namespace = "test"
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, nil)
p11.Namespace = "different-images"
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
p12.Namespace = "different-images"
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
p13.Namespace = "different-images"
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
p14.Namespace = "different-images"
p15 := test.BuildTestPod("p15", 100, 0, node1.Name, nil)
p15.Namespace = "node-fit"
p16 := test.BuildTestPod("NOT1", 100, 0, node1.Name, nil)
p16.Namespace = "node-fit"
p17 := test.BuildTestPod("NOT2", 100, 0, node1.Name, nil)
p17.Namespace = "node-fit"
p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil)
p18.Namespace = "node-fit"
// This pod sits on node6 and is used to take up CPU requests on the node
p19 := test.BuildTestPod("CPU-eater", 150, 150, node6.Name, nil)
p19.Namespace = "test"
// Dummy pod for node6 used to do the opposite of p19
p20 := test.BuildTestPod("CPU-saver", 100, 150, node6.Name, nil)
p20.Namespace = "test"
// ### Evictable Pods ###
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
ownerRef2 := test.GetReplicaSetOwnerRefList()
p8.ObjectMeta.OwnerReferences = ownerRef2
p9.ObjectMeta.OwnerReferences = ownerRef2
p10.ObjectMeta.OwnerReferences = ownerRef2
// ### Non-evictable Pods ###
// A DaemonSet.
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A Pod with local storage.
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
// A Mirror Pod.
p6.Annotations = test.GetMirrorPodAnnotation()
// A Critical Pod.
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
// Same owners, but different images
p11.Spec.Containers[0].Image = "foo"
p11.ObjectMeta.OwnerReferences = ownerRef1
p12.Spec.Containers[0].Image = "bar"
p12.ObjectMeta.OwnerReferences = ownerRef1
// Multiple containers
p13.ObjectMeta.OwnerReferences = ownerRef1
p13.Spec.Containers = append(p13.Spec.Containers, v1.Container{
Name: "foo",
Image: "foo",
})
// ### Pods Evictable Based On Node Fit ###
ownerRef3 := test.GetReplicaSetOwnerRefList()
p15.ObjectMeta.OwnerReferences = ownerRef3
p16.ObjectMeta.OwnerReferences = ownerRef3
p17.ObjectMeta.OwnerReferences = ownerRef3
p18.ObjectMeta.OwnerReferences = ownerRef3
p15.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
p16.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
p17.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
testCases := []struct {
description string
pods []*v1.Pod
@@ -189,92 +86,263 @@ func TestFindDuplicatePods(t *testing.T) {
nodefit bool
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node2},
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 1,
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node2},
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 0,
excludeOwnerKinds: []string{"ReplicaSet"},
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{p8, p9, p10},
nodes: []*v1.Node{node1, node2},
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 1,
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
nodes: []*v1.Node{node1, node2},
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 2,
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
pods: []*v1.Pod{p4, p5, p6, p7},
nodes: []*v1.Node{node1, node2},
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
pods: []*v1.Pod{
buildTestPodForNode("p4", nodeName1, test.SetDSOwnerRef),
buildTestPodForNode("p5", nodeName1, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
test.SetHostPathEmptyDirVolumeSource(pod)
}),
buildTestPodForNode("p6", nodeName1, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
}),
buildTestPodForNode("p7", nodeName1, func(pod *v1.Pod) {
pod.Namespace = "kube-system"
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Test all Pods: 4 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
nodes: []*v1.Node{node1, node2},
description: "Test all Pods: 4 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
buildTestPodForNode("p4", nodeName1, test.SetDSOwnerRef),
buildTestPodForNode("p5", nodeName1, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
test.SetHostPathEmptyDirVolumeSource(pod)
}),
buildTestPodForNode("p6", nodeName1, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
}),
buildTestPodForNode("p7", nodeName1, func(pod *v1.Pod) {
pod.Namespace = "kube-system"
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 2,
},
{
description: "Pods with the same owner but different images should not be evicted",
pods: []*v1.Pod{p11, p12},
nodes: []*v1.Node{node1, node2},
description: "Pods with the same owner but different images should not be evicted",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p11", "different-images", func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "foo"
}),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p12", "different-images", func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "bar"
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Pods with multiple containers should not match themselves",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node1, node2},
description: "Pods with multiple containers should not match themselves",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p13", "different-images", func(pod *v1.Pod) {
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "foo",
Image: "foo",
})
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
pods: []*v1.Pod{p11, p13},
nodes: []*v1.Node{node1, node2},
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p11", "different-images", func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "foo"
}),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p13", "different-images", func(pod *v1.Pod) {
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "foo",
Image: "foo",
})
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node3},
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName3, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
}),
},
expectedEvictedPodCount: 0,
nodefit: true,
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p15, p16, p17},
nodes: []*v1.Node{node1, node4},
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p15", "node-fit", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("NOT1", "node-fit", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("NOT2", "node-fit", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName4, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
}),
},
expectedEvictedPodCount: 0,
nodefit: true,
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node5},
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
}),
},
expectedEvictedPodCount: 0,
nodefit: true,
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p19},
nodes: []*v1.Node{node1, node6},
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
test.BuildTestPod("CPU-eater", 150, 150, nodeName6, func(pod *v1.Pod) {
pod.Namespace = "test"
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
test.BuildTestNode(nodeName6, 200, 200, 10, nil),
},
expectedEvictedPodCount: 0,
nodefit: true,
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p20},
nodes: []*v1.Node{node1, node6},
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
pods: []*v1.Pod{
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
test.BuildTestPod("CPU-saver", 100, 150, nodeName6, func(pod *v1.Pod) {
pod.Namespace = "test"
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
test.BuildTestNode(nodeName6, 200, 200, 10, nil),
},
expectedEvictedPodCount: 1,
nodefit: true,
},
@@ -330,26 +398,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
}
}
setTolerationsK1 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Tolerations = []v1.Toleration{
{
Key: "k1",
Value: "v1",
Operator: v1.TolerationOpEqual,
Effect: v1.TaintEffectNoSchedule,
},
}
}
setTolerationsK2 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Tolerations = []v1.Toleration{
{
Key: "k2",
Value: "v2",
Operator: v1.TolerationOpEqual,
Effect: v1.TaintEffectNoSchedule,
},
setNoScheduleTolerations := func(key, value string) func(*v1.Pod) {
return func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Tolerations = []v1.Toleration{
{
Key: key,
Value: value,
Operator: v1.TolerationOpEqual,
Effect: v1.TaintEffectNoSchedule,
},
}
}
}
@@ -377,70 +436,42 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
node.ObjectMeta.Labels["node-role.kubernetes.io/worker"] = "k2"
}
setNotMasterNodeSelectorK1 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/control-plane",
Operator: v1.NodeSelectorOpDoesNotExist,
},
{
Key: "k1",
Operator: v1.NodeSelectorOpDoesNotExist,
setNotMasterNodeSelector := func(key string) func(*v1.Pod) {
return func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/control-plane",
Operator: v1.NodeSelectorOpDoesNotExist,
},
{
Key: key,
Operator: v1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
},
}
}
}
setNotMasterNodeSelectorK2 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "node-role.kubernetes.io/control-plane",
Operator: v1.NodeSelectorOpDoesNotExist,
},
{
Key: "k2",
Operator: v1.NodeSelectorOpDoesNotExist,
},
},
},
},
},
},
setWorkerLabelSelector := func(value string) func(*v1.Pod) {
return func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
if pod.Spec.NodeSelector == nil {
pod.Spec.NodeSelector = map[string]string{}
}
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = value
}
}
setWorkerLabelSelectorK1 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
if pod.Spec.NodeSelector == nil {
pod.Spec.NodeSelector = map[string]string{}
}
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k1"
}
setWorkerLabelSelectorK2 := func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
if pod.Spec.NodeSelector == nil {
pod.Spec.NodeSelector = map[string]string{}
}
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k2"
}
testCases := []struct {
description string
pods []*v1.Pod
@@ -451,107 +482,107 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
description: "Evict pods uniformly",
pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p3", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p4", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p5", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p6", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p7", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p8", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p9", "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
description: "Evict pods uniformly with one node left out",
pods: []*v1.Pod{
// (5,3,1) -> (4,4,1) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p3", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p4", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p5", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p6", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p7", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p8", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p9", "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
},
},
{
description: "Evict pods uniformly with two replica sets",
pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
buildTestPodForNode("p11", "n1", setTwoRSOwnerRef),
buildTestPodForNode("p12", "n1", setTwoRSOwnerRef),
buildTestPodForNode("p13", "n1", setTwoRSOwnerRef),
buildTestPodForNode("p14", "n1", setTwoRSOwnerRef),
buildTestPodForNode("p15", "n1", setTwoRSOwnerRef),
buildTestPodForNode("p16", "n2", setTwoRSOwnerRef),
buildTestPodForNode("p17", "n2", setTwoRSOwnerRef),
buildTestPodForNode("p18", "n2", setTwoRSOwnerRef),
buildTestPodForNode("p19", "n3", setTwoRSOwnerRef),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
description: "Evict pods uniformly with two owner references",
pods: []*v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
buildTestPodForNode("p11", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p12", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p13", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p14", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p15", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p16", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p17", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p18", "n2", test.SetRSOwnerRef),
buildTestPodForNode("p19", "n3", test.SetRSOwnerRef),
// (1,3,5) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
buildTestPodForNode("p21", "n1", setRSOwnerRef2),
buildTestPodForNode("p22", "n2", setRSOwnerRef2),
buildTestPodForNode("p23", "n2", setRSOwnerRef2),
buildTestPodForNode("p24", "n2", setRSOwnerRef2),
buildTestPodForNode("p25", "n3", setRSOwnerRef2),
buildTestPodForNode("p26", "n3", setRSOwnerRef2),
buildTestPodForNode("p27", "n3", setRSOwnerRef2),
buildTestPodForNode("p28", "n3", setRSOwnerRef2),
buildTestPodForNode("p29", "n3", setRSOwnerRef2),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
description: "Evict pods with number of pods less than nodes",
pods: []*v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
@@ -560,125 +591,125 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
buildTestPodWithImage("p1", "n1", "foo"),
buildTestPodWithImage("p2", "n1", "bar"),
buildTestPodWithImage("p3", "n1", "baz"),
buildTestPodWithImage("p4", "n1", "foo"),
buildTestPodWithImage("p1", "foo"),
buildTestPodWithImage("p2", "bar"),
buildTestPodWithImage("p3", "baz"),
buildTestPodWithImage("p4", "foo"),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
description: "Evict pods with a single pod with three nodes",
pods: []*v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 0,
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, nil),
test.BuildTestNode("n2", 2000, 3000, 10, nil),
test.BuildTestNode("n3", 2000, 3000, 10, nil),
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, nil),
buildTestNode(nodeName3, nil),
},
},
{
description: "Evict pods uniformly respecting taints",
pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
buildTestPodForNode("p1", "worker1", setNoScheduleTolerations("k1", "v1")),
buildTestPodForNode("p2", "worker1", setNoScheduleTolerations("k2", "v2")),
buildTestPodForNode("p3", "worker1", setNoScheduleTolerations("k1", "v1")),
buildTestPodForNode("p4", "worker1", setNoScheduleTolerations("k2", "v2")),
buildTestPodForNode("p5", "worker1", setNoScheduleTolerations("k1", "v1")),
buildTestPodForNode("p6", "worker2", setNoScheduleTolerations("k2", "v2")),
buildTestPodForNode("p7", "worker2", setNoScheduleTolerations("k1", "v1")),
buildTestPodForNode("p8", "worker2", setNoScheduleTolerations("k2", "v2")),
buildTestPodForNode("p9", "worker3", setNoScheduleTolerations("k1", "v1")),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleTaint),
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleTaint),
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleTaint),
buildTestNode("worker1", nil),
buildTestNode("worker2", nil),
buildTestNode("worker3", nil),
buildTestNode("master1", setMasterNoScheduleTaint),
buildTestNode("master2", setMasterNoScheduleTaint),
buildTestNode("master3", setMasterNoScheduleTaint),
},
},
{
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
buildTestPodForNode("p1", "worker1", setNotMasterNodeSelector("k1")),
buildTestPodForNode("p2", "worker1", setNotMasterNodeSelector("k2")),
buildTestPodForNode("p3", "worker1", setNotMasterNodeSelector("k1")),
buildTestPodForNode("p4", "worker1", setNotMasterNodeSelector("k2")),
buildTestPodForNode("p5", "worker1", setNotMasterNodeSelector("k1")),
buildTestPodForNode("p6", "worker2", setNotMasterNodeSelector("k2")),
buildTestPodForNode("p7", "worker2", setNotMasterNodeSelector("k1")),
buildTestPodForNode("p8", "worker2", setNotMasterNodeSelector("k2")),
buildTestPodForNode("p9", "worker3", setNotMasterNodeSelector("k1")),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleLabel),
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleLabel),
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleLabel),
buildTestNode("worker1", nil),
buildTestNode("worker2", nil),
buildTestNode("worker3", nil),
buildTestNode("master1", setMasterNoScheduleLabel),
buildTestNode("master2", setMasterNoScheduleLabel),
buildTestNode("master3", setMasterNoScheduleLabel),
},
},
{
description: "Evict pods uniformly respecting node selector",
pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
buildTestPodForNode("p1", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p2", "worker1", setWorkerLabelSelector("k2")),
buildTestPodForNode("p3", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p4", "worker1", setWorkerLabelSelector("k2")),
buildTestPodForNode("p5", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p6", "worker2", setWorkerLabelSelector("k2")),
buildTestPodForNode("p7", "worker2", setWorkerLabelSelector("k1")),
buildTestPodForNode("p8", "worker2", setWorkerLabelSelector("k2")),
buildTestPodForNode("p9", "worker3", setWorkerLabelSelector("k1")),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
test.BuildTestNode("worker1", 2000, 3000, 10, setWorkerLabel),
test.BuildTestNode("worker2", 2000, 3000, 10, setWorkerLabel),
test.BuildTestNode("worker3", 2000, 3000, 10, setWorkerLabel),
test.BuildTestNode("master1", 2000, 3000, 10, nil),
test.BuildTestNode("master2", 2000, 3000, 10, nil),
test.BuildTestNode("master3", 2000, 3000, 10, nil),
buildTestNode("worker1", setWorkerLabel),
buildTestNode("worker2", setWorkerLabel),
buildTestNode("worker3", setWorkerLabel),
buildTestNode("master1", nil),
buildTestNode("master2", nil),
buildTestNode("master3", nil),
},
},
{
description: "Evict pods uniformly respecting node selector with zero target nodes",
pods: []*v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
buildTestPodForNode("p1", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p2", "worker1", setWorkerLabelSelector("k2")),
buildTestPodForNode("p3", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p4", "worker1", setWorkerLabelSelector("k2")),
buildTestPodForNode("p5", "worker1", setWorkerLabelSelector("k1")),
buildTestPodForNode("p6", "worker2", setWorkerLabelSelector("k2")),
buildTestPodForNode("p7", "worker2", setWorkerLabelSelector("k1")),
buildTestPodForNode("p8", "worker2", setWorkerLabelSelector("k2")),
buildTestPodForNode("p9", "worker3", setWorkerLabelSelector("k1")),
},
expectedEvictedPodCount: 0,
nodes: []*v1.Node{
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
test.BuildTestNode("master1", 2000, 3000, 10, nil),
test.BuildTestNode("master2", 2000, 3000, 10, nil),
test.BuildTestNode("master3", 2000, 3000, 10, nil),
buildTestNode("worker1", nil),
buildTestNode("worker2", nil),
buildTestNode("worker3", nil),
buildTestNode("master1", nil),
buildTestNode("master2", nil),
buildTestNode("master3", nil),
},
},
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -22,7 +22,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -33,73 +32,93 @@ import (
"sigs.k8s.io/descheduler/test"
)
func initPods(node *v1.Node) []*v1.Pod {
pods := make([]*v1.Pod, 0)
const (
nodeName1 = "node1"
nodeName2 = "node2"
nodeName3 = "node3"
nodeName4 = "node4"
nodeName5 = "node5"
)
for i := int32(0); i <= 9; i++ {
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
}
// pod at index i will have 25 * i restarts.
pod.Status = v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 5 * i,
},
func setPodContainerStatusRestartCount(pod *v1.Pod, base int32) {
pod.Status = v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 5 * base,
},
ContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 10 * i,
},
{
RestartCount: 10 * i,
},
},
ContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 10 * base,
},
}
pods = append(pods, pod)
}
// The following 3 pods won't get evicted.
// A daemonset.
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pods[7].Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
{
RestartCount: 10 * base,
},
},
}
// A Mirror Pod.
pods[8].Annotations = test.GetMirrorPodAnnotation()
}
func initPodContainersWithStatusRestartCount(name string, base int32, apply func(pod *v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName1, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
// pod at index i will have 25 * i restarts, 5 for init container, 20 for other two containers
setPodContainerStatusRestartCount(pod, base)
if apply != nil {
apply(pod)
}
})
}
func initPods(apply func(pod *v1.Pod)) []*v1.Pod {
pods := make([]*v1.Pod, 0)
for i := int32(0); i <= 9; i++ {
switch i {
default:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, apply))
// The following 3 pods won't get evicted.
// A daemonset.
case 6:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
if apply != nil {
apply(pod)
}
}))
// A pod with local storage.
case 7:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
test.SetHostPathEmptyDirVolumeSource(pod)
if apply != nil {
apply(pod)
}
}))
// A Mirror Pod.
case 8:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
if apply != nil {
apply(pod)
}
}))
}
}
pods = append(
pods,
test.BuildTestPod("CPU-consumer-1", 150, 100, nodeName4, test.SetNormalOwnerRef),
test.BuildTestPod("CPU-consumer-2", 150, 100, nodeName5, test.SetNormalOwnerRef),
)
return pods
}
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
})
node3 := test.BuildTestNode("node3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
createRemovePodsHavingTooManyRestartsAgrs := func(
podRestartThresholds int32,
includingInitContainers bool,
@@ -114,207 +133,261 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
tests := []struct {
description string
pods []*v1.Pod
nodes []*v1.Node
args RemovePodsHavingTooManyRestartsArgs
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
nodeFit bool
applyFunc func([]*v1.Pod)
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts under threshold, no pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "Some pods have total restarts bigger than threshold",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 5,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
nodes: []*v1.Node{node1},
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 1,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
nodes: []*v1.Node{node1},
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 1,
nodeFit: false,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node2},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
}),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node3},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName3, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
}),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node4},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
test.BuildTestNode(nodeName4, 200, 3000, 10, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node5},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
pods: initPods(func(pod *v1.Pod) {
if len(pod.Status.ContainerStatuses) > 0 {
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
}
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
if len(pod.Status.ContainerStatuses) > 0 {
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
},
{
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods running with state=Running, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
pods: initPods(func(pod *v1.Pod) {
pod.Status.Phase = v1.PodRunning
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "pods pending with state=Running, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
pods: initPods(func(pod *v1.Pod) {
pod.Status.Phase = v1.PodPending
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
pods: initPods(func(pod *v1.Pod) {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
}
}
},
},
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
},
{
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods running with state=Running, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodRunning
}
},
},
{
description: "pods pending with state=Running, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
pods: initPods(func(pod *v1.Pod) {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodPending
}
},
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}
},
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
pods := append(
initPods(node1),
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, test.SetNormalOwnerRef),
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, test.SetNormalOwnerRef),
)
if tc.applyFunc != nil {
tc.applyFunc(pods)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -322,7 +395,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range pods {
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -33,84 +33,75 @@ import (
"sigs.k8s.io/descheduler/test"
)
func TestPodAntiAffinity(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
const (
nodeName1 = "n1"
nodeName2 = "n2"
nodeName3 = "n3"
nodeName4 = "n4"
nodeName5 = "n5"
)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
p9.DeletionTimestamp = &metav1.Time{}
p10.DeletionTimestamp = &metav1.Time{}
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(name, 2000, 3000, 10, apply)
}
criticalPriority := utils.SystemCriticalPriority
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
pod.Spec.Priority = &criticalPriority
})
p2.Labels = map[string]string{"foo": "bar"}
p5.Labels = map[string]string{"foo": "bar"}
p6.Labels = map[string]string{"foo": "bar"}
p7.Labels = map[string]string{"foo1": "bar1"}
p11.Labels = map[string]string{"foo": "bar"}
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
test.SetNormalOwnerRef(p1)
test.SetNormalOwnerRef(p2)
test.SetNormalOwnerRef(p3)
test.SetNormalOwnerRef(p4)
test.SetNormalOwnerRef(p5)
test.SetNormalOwnerRef(p6)
test.SetNormalOwnerRef(p7)
test.SetNormalOwnerRef(p9)
test.SetNormalOwnerRef(p10)
test.SetNormalOwnerRef(p11)
// set pod anti affinity
test.SetPodAntiAffinity(p1, "foo", "bar")
test.SetPodAntiAffinity(p3, "foo", "bar")
test.SetPodAntiAffinity(p4, "foo", "bar")
test.SetPodAntiAffinity(p5, "foo1", "bar1")
test.SetPodAntiAffinity(p6, "foo1", "bar1")
test.SetPodAntiAffinity(p7, "foo", "bar")
test.SetPodAntiAffinity(p9, "foo", "bar")
test.SetPodAntiAffinity(p10, "foo", "bar")
// set pod priority
test.SetPodPriority(p5, 100)
test.SetPodPriority(p6, 50)
test.SetPodPriority(p7, 0)
// Set pod node selectors
p8.Spec.NodeSelector = map[string]string{
"datacenter": "west",
func setNodeMainRegionLabel(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
}
func buildTestNode1() *v1.Node {
return buildTestNode(nodeName1, setNodeMainRegionLabel)
}
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName, apply)
}
func buildTestPodForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
return buildTestPod(name, nodeName1, apply)
}
func setPodAntiAffinityFooBar(pod *v1.Pod) {
test.SetPodAntiAffinity(pod, "foo", "bar")
}
func setPodAntiAffinityFoo1Bar1(pod *v1.Pod) {
test.SetPodAntiAffinity(pod, "foo1", "bar1")
}
func setLabelsFooBar(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar"}
}
func setLabelsFoo1Bar1(pod *v1.Pod) {
pod.Labels = map[string]string{"foo1": "bar1"}
}
func buildTestPodWithAntiAffinityForNode1(name string) *v1.Pod {
return buildTestPodForNode1(name, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
})
}
func buildTestPodP2ForNode1() *v1.Pod {
return buildTestPodForNode1("p2", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
})
}
func buildTestPodNonEvictableForNode1() *v1.Pod {
criticalPriority := utils.SystemCriticalPriority
return buildTestPodForNode1("non-evict", func(pod *v1.Pod) {
test.SetPodPriority(pod, criticalPriority)
setLabelsFooBar(pod)
})
}
func TestPodAntiAffinity(t *testing.T) {
var uint1 uint = 1
var uint3 uint = 3
@@ -125,87 +116,204 @@ func TestPodAntiAffinity(t *testing.T) {
nodes []*v1.Node
}{
{
description: "Maximum pods to evict - 0",
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
description: "Maximum pods to evict - 0",
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxNoOfPodsToEvictTotal)",
maxNoOfPodsToEvictPerNamespace: &uint3,
maxNoOfPodsToEvictTotal: &uint1,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evict only 1 pod after sorting",
pods: []*v1.Pod{p5, p6, p7},
nodes: []*v1.Node{node1},
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1},
description: "Evict only 1 pod after sorting",
pods: []*v1.Pod{
buildTestPodForNode1("p5", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
setPodAntiAffinityFoo1Bar1(pod)
test.SetPodPriority(pod, 100)
}),
buildTestPodForNode1("p6", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
setPodAntiAffinityFoo1Bar1(pod)
test.SetPodPriority(pod, 50)
}),
buildTestPodForNode1("p7", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFoo1Bar1(pod)
setPodAntiAffinityFooBar(pod)
test.SetPodPriority(pod, 0)
}),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1},
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node2},
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodForNode1("p8", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
buildTestNode(nodeName2, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
}),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node3},
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodForNode1("p8", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
buildTestNode(nodeName3, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
}),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{p9, p10},
nodes: []*v1.Node{node1},
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{
buildTestPodForNode1("p9", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
pod.DeletionTimestamp = &metav1.Time{}
}),
buildTestPodForNode1("p10", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
pod.DeletionTimestamp = &metav1.Time{}
}),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 0,
},
{
description: "Won't evict pods because only other node doesn't have enough resources",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1, node4},
description: "Won't evict pods because only other node doesn't have enough resources",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
test.BuildTestNode(nodeName4, 2, 2, 1, nil),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
pods: []*v1.Pod{p1, p11},
nodes: []*v1.Node{node1, node5},
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPod("p11", nodeName5, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
}),
},
nodes: []*v1.Node{
buildTestNode1(),
test.BuildTestNode(nodeName5, 200, 3000, 10, setNodeMainRegionLabel),
},
expectedEvictedPodCount: 1,
nodeFit: false,
},

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -32,73 +32,91 @@ import (
"sigs.k8s.io/descheduler/test"
)
const (
nodeWithLabelsName = "nodeWithLabels"
nodeWithoutLabelsName = "nodeWithoutLabels"
unschedulableNodeWithLabelsName = "unschedulableNodeWithLabels"
nodeLabelKey = "kubernetes.io/desiredNode"
nodeLabelValue = "yes"
)
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(name, 2000, 3000, 10, apply)
}
func setNodeDesiredNodeLabel(node *v1.Node) {
node.Labels[nodeLabelKey] = nodeLabelValue
}
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName, apply)
}
func buildUnschedulableNodeWithLabels() *v1.Node {
return buildTestNode(unschedulableNodeWithLabelsName, func(node *v1.Node) {
setNodeDesiredNodeLabel(node)
node.Spec.Unschedulable = true
})
}
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
nodeLabelKey := "kubernetes.io/desiredNode"
nodeLabelValue := "yes"
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
addPodsToNode := func(nodeName string, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
podWithNodeAffinity := buildTestPod("podWithNodeAffinity", nodeName, func(pod *v1.Pod) {
pod.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{},
}
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{},
}
switch affinityType {
case "requiredDuringSchedulingIgnoredDuringExecution":
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
switch affinityType {
case "requiredDuringSchedulingIgnoredDuringExecution":
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
}
case "preferredDuringSchedulingIgnoredDuringExecution":
pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
},
}
case "requiredDuringSchedulingRequiredDuringExecution":
default:
t.Fatalf("Invalid affinity type %s", affinityType)
}
case "preferredDuringSchedulingIgnoredDuringExecution":
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
{
Weight: 10,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeLabelKey,
Operator: "In",
Values: []string{
nodeLabelValue,
},
},
},
},
},
}
case "requiredDuringSchedulingRequiredDuringExecution":
default:
t.Fatalf("Invalid affinity type %s", affinityType)
}
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.DeletionTimestamp = deletionTimestamp
})
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
pod1.DeletionTimestamp = deletionTimestamp
pod2.DeletionTimestamp = deletionTimestamp
pod1 := buildTestPod("pod1", nodeName, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.DeletionTimestamp = deletionTimestamp
})
pod2 := buildTestPod("pod2", nodeName, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.DeletionTimestamp = deletionTimestamp
})
return []*v1.Pod{
podWithNodeAffinity,
@@ -126,8 +144,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingRequiredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
},
{
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
@@ -135,8 +156,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels},
pods: addPodsToNode(nodeWithLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
},
{
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
@@ -144,8 +167,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels},
pods: addPodsToNode(nodeWithLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
@@ -153,8 +178,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
@@ -162,8 +190,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
@@ -171,8 +202,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint1,
},
{
@@ -181,8 +215,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint1,
},
{
@@ -191,8 +228,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint0,
},
{
@@ -201,8 +241,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint0,
},
{
@@ -211,8 +254,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint1,
},
{
@@ -221,8 +267,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxPodsToEvictPerNode: &uint1,
},
{
@@ -231,8 +280,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
@@ -241,8 +293,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
maxNoOfPodsToEvictTotal: &uint0,
},
@@ -252,8 +307,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
@@ -262,8 +320,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint0,
},
{
@@ -272,8 +333,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint0,
},
{
@@ -282,8 +346,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
@@ -292,8 +359,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
@@ -302,8 +372,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildUnschedulableNodeWithLabels(),
},
nodefit: true,
},
{
@@ -312,8 +385,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithoutLabelsName, nil),
buildUnschedulableNodeWithLabels(),
},
nodefit: true,
},
{
@@ -322,8 +398,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
buildUnschedulableNodeWithLabels(),
},
maxPodsToEvictPerNode: &uint1,
nodefit: true,
},
@@ -333,8 +412,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
args: RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
},
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
nodes: []*v1.Node{
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
buildUnschedulableNodeWithLabels(),
},
maxPodsToEvictPerNode: &uint1,
nodefit: true,
},

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -22,7 +22,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -34,6 +33,37 @@ import (
"sigs.k8s.io/descheduler/test"
)
const (
nodeName1 = "n1"
nodeName2 = "n2"
nodeName3 = "n3"
nodeName4 = "n4"
nodeName5 = "n5"
nodeName6 = "n6"
nodeName7 = "n7"
datacenterLabel = "datacenter"
datacenterEast = "east"
datacenterWest = "west"
)
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(name, 2000, 3000, 10, apply)
}
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName, apply)
}
func buildTestPodWithNormalOwnerRef(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return buildTestPod(name, nodeName, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
if apply != nil {
apply(pod)
}
})
}
func createNoScheduleTaint(key, value string, index int) v1.Taint {
return v1.Taint{
Key: "testTaint" + fmt.Sprintf("%v", index),
@@ -50,13 +80,39 @@ func createPreferNoScheduleTaint(key, value string, index int) v1.Taint {
}
}
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
taints := []v1.Taint{}
for _, index := range indices {
taints = append(taints, createNoScheduleTaint(key, value, index))
func withTestTaint1(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
createNoScheduleTaint("testTaint", "test", 1),
}
}
func withTestingTaint1(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
createNoScheduleTaint("testingTaint", "testing", 1),
}
}
func withBothTaints1(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
createNoScheduleTaint("testTaint", "test", 1),
createNoScheduleTaint("testingTaint", "testing", 1),
}
}
func withDatacenterEastLabel(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
datacenterLabel: datacenterEast,
}
}
func withUnschedulable(node *v1.Node) {
node.Spec.Unschedulable = true
}
func withPreferNoScheduleTestTaint1(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
node.Spec.Taints = taints
return node
}
func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.TaintEffect) *v1.Pod {
@@ -69,111 +125,24 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.Tai
return pod
}
func withTestTaintToleration1(pod *v1.Pod) {
addTolerationToPod(pod, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
}
func withTestTaintXToleration1(pod *v1.Pod) {
addTolerationToPod(pod, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
}
func withLocalStorageVolume(pod *v1.Pod) {
test.SetHostPathEmptyDirVolumeSource(pod)
}
func withKubeSystemCriticalPod(pod *v1.Pod) {
pod.Namespace = "kube-system"
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node5 := test.BuildTestNode("n5", 2000, 3000, 10, nil)
node5.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
node6 := test.BuildTestNode("n6", 1, 1, 1, nil)
node6.Spec.Taints = []v1.Taint{
createPreferNoScheduleTaint("testTaint", "test", 1),
}
node7 := test.BuildTestNode("n7", 2000, 3000, 10, nil)
node7 = addTaintsToNode(node7, "testTaint", "test", []int{1})
node7 = addTaintsToNode(node7, "testingTaint", "testing", []int{1})
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
p12 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p8.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p10.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p12.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// The following 4 pods won't get evicted.
// A Critical Pod.
p7.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
p7.Spec.Priority = &priority
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// A daemonset.
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p9.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
// A Mirror Pod.
p10.Annotations = test.GetMirrorPodAnnotation()
p1 = addTolerationToPod(p1, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p3 = addTolerationToPod(p3, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
p12.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
p13 := test.BuildTestPod("p13", 100, 0, node5.Name, nil)
p13.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
// node5 has PreferNoSchedule:testTaint1=test1, so the p13 has to have
// PreferNoSchedule:testTaint0=test0 so the pod is not tolarated
p13 = addTolerationToPod(p13, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
p14 := test.BuildTestPod("p14", 100, 0, node7.Name, nil)
p14.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p14 = addTolerationToPod(p14, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p15 := test.BuildTestPod("p15", 100, 0, node7.Name, nil)
p15.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
p15 = addTolerationToPod(p15, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
p15 = addTolerationToPod(p15, "testingTaint", "testing", 1, v1.TaintEffectNoSchedule)
var uint1, uint2 uint = 1, 2
tests := []struct {
@@ -192,203 +161,306 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
includedTaints []string
}{
{
description: "Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
expectedEvictedPodCount: 1, // p2 gets evicted
},
{
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p3, p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p4", nodeName1, withTestTaintXToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
expectedEvictedPodCount: 1, // p4 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictTotal> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Only <maxNoOfPodsToEvictTotal> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
maxPodsToEvictPerNode: &uint2,
maxNoOfPodsToEvictTotal: &uint1,
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
},
{
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
maxPodsToEvictPerNode: &uint1,
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Critical pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
},
nodes: []*v1.Node{
buildTestNode(nodeName2, withTestingTaint1),
},
expectedEvictedPodCount: 0, // nothing is evicted
},
{
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
nodes: []*v1.Node{node2},
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
},
nodes: []*v1.Node{
buildTestNode(nodeName2, withTestingTaint1),
},
evictLocalStoragePods: true,
evictSystemCriticalPods: false,
expectedEvictedPodCount: 1, // p9 gets evicted
},
{
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p7, p8, p10, p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
buildTestPodWithNormalOwnerRef("p11", nodeName2, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName2, withTestingTaint1),
},
expectedEvictedPodCount: 1, // p11 gets evicted
},
{
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
buildTestNode(nodeName2, withTestingTaint1),
},
evictSystemCriticalPods: true,
expectedEvictedPodCount: 2, // p2 and p7 are evicted
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
buildTestNode(nodeName2, withTestingTaint1),
},
expectedEvictedPodCount: 0, // p2 gets evicted
nodeFit: true,
},
{
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
pods: []*v1.Pod{p1, p3, p12},
nodes: []*v1.Node{node1, node3},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p11", nodeName2, func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
datacenterLabel: datacenterWest,
}
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
buildTestNode(nodeName3, withDatacenterEastLabel),
},
expectedEvictedPodCount: 0, // p2 gets evicted
nodeFit: true,
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1, node4},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
buildTestNode(nodeName4, withUnschedulable),
},
expectedEvictedPodCount: 0, // p2 gets evicted
nodeFit: true,
},
{
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p13", nodeName5, func(pod *v1.Pod) {
addTolerationToPod(pod, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName5, withPreferNoScheduleTestTaint1),
},
expectedEvictedPodCount: 0,
},
{
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
pods: []*v1.Pod{p13},
nodes: []*v1.Node{node5},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p13", nodeName5, func(pod *v1.Pod) {
addTolerationToPod(pod, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName5, withPreferNoScheduleTestTaint1),
},
includePreferNoSchedule: true,
expectedEvictedPodCount: 1, // p13 gets evicted
},
{
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
excludedTaints: []string{"excludedTaint1", "testTaint1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as one of the specified excludedTaints matches the key of node1's taint
},
{
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
excludedTaints: []string{"testTaint1=test1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as both the key and value of the excluded taint match node1's taint
},
{
description: "The excluded taint matches the key of node1's taint, but does not match the value",
pods: []*v1.Pod{p2},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "The excluded taint matches the key of node1's taint, but does not match the value",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
excludedTaints: []string{"testTaint1=test2"},
expectedEvictedPodCount: 1, // pod gets evicted, as excluded taint value does not match node1's taint value
},
{
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node6},
evictLocalStoragePods: false,
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
test.BuildTestNode(nodeName6, 1, 1, 1, withPreferNoScheduleTestTaint1),
},
evictSystemCriticalPods: true,
expectedEvictedPodCount: 0, // p2 and p7 can't be evicted
nodeFit: true,
},
{
description: "Pods tolerating included taints should not get evicted even with other taints present",
pods: []*v1.Pod{p1},
nodes: []*v1.Node{node7},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods tolerating included taints should not get evicted even with other taints present",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName7, withBothTaints1),
},
includedTaints: []string{"testTaint1=test1"},
expectedEvictedPodCount: 0, // nothing gets evicted, as p1 tolerates the included taint, and taint "testingTaint1=testing1" is not included
},
{
description: "Pods not tolerating not included taints should not get evicted",
pods: []*v1.Pod{p1, p2, p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating not included taints should not get evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p4", nodeName1, withTestTaintXToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
includedTaints: []string{"testTaint2=test2"},
expectedEvictedPodCount: 0, // nothing gets evicted, as taint is not included, even though the pods' p2 and p4 tolerations do not match node1's taint
},
{
description: "Pods tolerating includedTaint should not get evicted. Pods not tolerating includedTaints should get evicted",
pods: []*v1.Pod{p1, p2, p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods tolerating includedTaint should not get evicted. Pods not tolerating includedTaints should get evicted",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
},
nodes: []*v1.Node{
buildTestNode(nodeName1, withTestTaint1),
},
includedTaints: []string{"testTaint1=test1"},
expectedEvictedPodCount: 1, // node1 taint is included. p1 and p3 tolerate the included taint, p2 gets evicted
},
{
description: "Pods not tolerating all taints are evicted when includedTaints is empty",
pods: []*v1.Pod{p14, p15},
nodes: []*v1.Node{node7},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
description: "Pods not tolerating all taints are evicted when includedTaints is empty",
pods: []*v1.Pod{
buildTestPodWithNormalOwnerRef("p14", nodeName7, withTestTaintToleration1),
buildTestPodWithNormalOwnerRef("p15", nodeName7, func(pod *v1.Pod) {
withTestTaintToleration1(pod)
addTolerationToPod(pod, "testingTaint", "testing", 1, v1.TaintEffectNoSchedule)
}),
},
nodes: []*v1.Node{
buildTestNode(nodeName7, withBothTaints1),
},
expectedEvictedPodCount: 1, // includedTaints is empty so all taints are included. p15 tolerates both node taints and does not get evicted. p14 tolerate only one and gets evicted
},
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -9,12 +9,10 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
@@ -1490,18 +1488,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
var evictedPods []string
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
evictedPods = append(evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
})
test.RegisterEvictedPodsCollector(fakeClient, &evictedPods)
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(&tc.args)

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -78,6 +78,14 @@ type handleImpl struct {
var _ frameworktypes.Handle = &handleImpl{}
// pluginHandle wraps a shared handleImpl and adds a plugin-specific instance ID
type pluginHandle struct {
*handleImpl
pluginInstanceID string
}
var _ frameworktypes.Handle = &pluginHandle{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
@@ -106,6 +114,17 @@ func (hi *handleImpl) Evictor() frameworktypes.Evictor {
return hi.evictor
}
// PluginInstanceID returns an empty string for the base handle.
// Plugins should receive a pluginHandle which has a specific instance ID.
func (hi *handleImpl) PluginInstanceID() string {
panic(fmt.Errorf("Not implemented"))
}
// PluginInstanceID returns a unique identifier for this plugin instance.
func (ph *pluginHandle) PluginInstanceID() string {
return ph.pluginInstanceID
}
type filterPlugin interface {
frameworktypes.Plugin
Filter(pod *v1.Pod) bool
@@ -142,6 +161,7 @@ type handleImplOpts struct {
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
podEvictor *evictions.PodEvictor
metricsCollector *metricscollector.MetricsCollector
profileInstanceID string
}
// WithClientSet sets clientSet for the scheduling frameworkImpl.
@@ -182,6 +202,14 @@ func WithMetricsCollector(metricsCollector *metricscollector.MetricsCollector) O
}
}
// WithProfileInstanceID sets the profile instance ID for the handle.
// This will be used to construct unique plugin instance IDs.
func WithProfileInstanceID(profileInstanceID string) Option {
return func(o *handleImplOpts) {
o.profileInstanceID = profileInstanceID
}
}
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
for idx, pluginConfig := range pluginConfigs {
if pluginConfig.Name == pluginName {
@@ -191,7 +219,7 @@ func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.
return nil, 0
}
func buildPlugin(ctx context.Context, config api.DeschedulerProfile, pluginName string, handle *handleImpl, reg pluginregistry.Registry) (frameworktypes.Plugin, error) {
func buildPlugin(ctx context.Context, config api.DeschedulerProfile, pluginName string, handle frameworktypes.Handle, reg pluginregistry.Registry) (frameworktypes.Plugin, error) {
pc, _ := getPluginConfig(pluginName, config.PluginConfigs)
if pc == nil {
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", pluginName, "profile", config.Name)
@@ -272,6 +300,7 @@ func NewProfile(ctx context.Context, config api.DeschedulerProfile, reg pluginre
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
}
// Create a base handle that will be used as a template for plugin-specific handles
handle := &handleImpl{
clientSet: hOpts.clientSet,
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
@@ -284,20 +313,26 @@ func NewProfile(ctx context.Context, config api.DeschedulerProfile, reg pluginre
prometheusClient: hOpts.prometheusClient,
}
// Collect all unique plugin names across all extension points
pluginNames := append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...)
pluginNames = append(pluginNames, config.Plugins.Filter.Enabled...)
pluginNames = append(pluginNames, config.Plugins.PreEvictionFilter.Enabled...)
// Build each unique plugin only once with a unique plugin instance ID
plugins := make(map[string]frameworktypes.Plugin)
for _, plugin := range sets.New(pluginNames...).UnsortedList() {
pg, err := buildPlugin(ctx, config, plugin, handle, reg)
for idx, pluginName := range sets.New(pluginNames...).UnsortedList() {
ph := &pluginHandle{
handleImpl: handle,
pluginInstanceID: fmt.Sprintf("%s-%d", hOpts.profileInstanceID, idx),
}
pg, err := buildPlugin(ctx, config, pluginName, ph, reg)
if err != nil {
return nil, fmt.Errorf("unable to build %v plugin: %v", plugin, err)
return nil, fmt.Errorf("unable to build %v plugin: %v", pluginName, err)
}
if pg == nil {
return nil, fmt.Errorf("got empty %v plugin build", plugin)
return nil, fmt.Errorf("got empty %v plugin build", pluginName)
}
plugins[plugin] = pg
plugins[pluginName] = pg
}
// Later, when a default list of plugins and their extension points is established,

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"sort"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
@@ -26,7 +27,60 @@ import (
testutils "sigs.k8s.io/descheduler/test"
)
// registerDefaultEvictor registers the DefaultEvictor plugin with the given registry
func registerDefaultEvictor(registry pluginregistry.Registry) {
pluginregistry.Register(
defaultevictor.PluginName,
defaultevictor.New,
&defaultevictor.DefaultEvictor{},
&defaultevictor.DefaultEvictorArgs{},
defaultevictor.ValidateDefaultEvictorArgs,
defaultevictor.SetDefaults_DefaultEvictorArgs,
registry,
)
}
func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
// Helper to build profile config with default Filter and PreEvictionFilter
buildProfileConfig := func(name string, descheduleEnabled, balanceEnabled bool) api.DeschedulerProfile {
config := api.DeschedulerProfile{
Name: name,
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: nil,
},
},
},
{
Name: "FakePlugin",
Args: &fakeplugin.FakePluginArgs{},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
}
if descheduleEnabled {
config.Plugins.Deschedule = api.PluginSet{
Enabled: []string{"FakePlugin"},
}
}
if balanceEnabled {
config.Plugins.Balance = api.PluginSet{
Enabled: []string{"FakePlugin"},
}
}
return config
}
tests := []struct {
name string
config api.DeschedulerProfile
@@ -34,134 +88,26 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
expectedEviction bool
}{
{
name: "profile with deschedule extension point enabled single eviction",
config: api.DeschedulerProfile{
Name: "strategy-test-profile-with-deschedule",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: nil,
},
},
},
{
Name: "FakePlugin",
Args: &fakeplugin.FakePluginArgs{},
},
},
Plugins: api.Plugins{
Deschedule: api.PluginSet{
Enabled: []string{"FakePlugin"},
},
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
name: "profile with deschedule extension point enabled single eviction",
config: buildProfileConfig("strategy-test-profile-with-deschedule", true, false),
extensionPoint: frameworktypes.DescheduleExtensionPoint,
expectedEviction: true,
},
{
name: "profile with balance extension point enabled single eviction",
config: api.DeschedulerProfile{
Name: "strategy-test-profile-with-balance",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: nil,
},
},
},
{
Name: "FakePlugin",
Args: &fakeplugin.FakePluginArgs{},
},
},
Plugins: api.Plugins{
Balance: api.PluginSet{
Enabled: []string{"FakePlugin"},
},
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
name: "profile with balance extension point enabled single eviction",
config: buildProfileConfig("strategy-test-profile-with-balance", false, true),
extensionPoint: frameworktypes.BalanceExtensionPoint,
expectedEviction: true,
},
{
name: "profile with deschedule extension point balance enabled no eviction",
config: api.DeschedulerProfile{
Name: "strategy-test-profile-with-deschedule",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: nil,
},
},
},
{
Name: "FakePlugin",
Args: &fakeplugin.FakePluginArgs{},
},
},
Plugins: api.Plugins{
Balance: api.PluginSet{
Enabled: []string{"FakePlugin"},
},
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
name: "profile with deschedule extension point balance enabled no eviction",
config: buildProfileConfig("strategy-test-profile-with-balance", false, true),
extensionPoint: frameworktypes.DescheduleExtensionPoint,
expectedEviction: false,
},
{
name: "profile with balance extension point deschedule enabled no eviction",
config: api.DeschedulerProfile{
Name: "strategy-test-profile-with-deschedule",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: nil,
},
},
},
{
Name: "FakePlugin",
Args: &fakeplugin.FakePluginArgs{},
},
},
Plugins: api.Plugins{
Deschedule: api.PluginSet{
Enabled: []string{"FakePlugin"},
},
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
name: "profile with balance extension point deschedule enabled no eviction",
config: buildProfileConfig("strategy-test-profile-with-deschedule", true, false),
extensionPoint: frameworktypes.BalanceExtensionPoint,
expectedEviction: false,
},
@@ -206,25 +152,9 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
}
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(
"FakePlugin",
fakeplugin.NewPluginFncFromFake(&fakePlugin),
&fakeplugin.FakePlugin{},
&fakeplugin.FakePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
fakeplugin.RegisterFakePlugin("FakePlugin", &fakePlugin, pluginregistry.PluginRegistry)
pluginregistry.Register(
defaultevictor.PluginName,
defaultevictor.New,
&defaultevictor.DefaultEvictor{},
&defaultevictor.DefaultEvictorArgs{},
defaultevictor.ValidateDefaultEvictorArgs,
defaultevictor.SetDefaults_DefaultEvictorArgs,
pluginregistry.PluginRegistry,
)
registerDefaultEvictor(pluginregistry.PluginRegistry)
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
var evictedPods []string
@@ -319,56 +249,13 @@ func TestProfileExtensionPoints(t *testing.T) {
fakeBalancePlugin := &fakeplugin.FakeBalancePlugin{PluginName: balancePluginName}
fakeFilterPlugin := &fakeplugin.FakeFilterPlugin{PluginName: filterPluginName}
pluginregistry.Register(
fakePluginName,
fakeplugin.NewPluginFncFromFake(fakePlugin),
&fakeplugin.FakePlugin{},
&fakeplugin.FakePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
pluginregistry.Register(
deschedulePluginName,
fakeplugin.NewFakeDeschedulePluginFncFromFake(fakeDeschedulePlugin),
&fakeplugin.FakeDeschedulePlugin{},
&fakeplugin.FakeDeschedulePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
pluginregistry.Register(
balancePluginName,
fakeplugin.NewFakeBalancePluginFncFromFake(fakeBalancePlugin),
&fakeplugin.FakeBalancePlugin{},
&fakeplugin.FakeBalancePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
pluginregistry.Register(
filterPluginName,
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
&fakeplugin.FakeFilterPlugin{},
&fakeplugin.FakeFilterPluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
fakeplugin.RegisterFakePlugin(fakePluginName, fakePlugin, pluginregistry.PluginRegistry)
fakeplugin.RegisterFakeDeschedulePlugin(deschedulePluginName, fakeDeschedulePlugin, pluginregistry.PluginRegistry)
fakeplugin.RegisterFakeBalancePlugin(balancePluginName, fakeBalancePlugin, pluginregistry.PluginRegistry)
fakeplugin.RegisterFakeFilterPlugin(filterPluginName, fakeFilterPlugin, pluginregistry.PluginRegistry)
}
pluginregistry.Register(
defaultevictor.PluginName,
defaultevictor.New,
&defaultevictor.DefaultEvictor{},
&defaultevictor.DefaultEvictorArgs{},
defaultevictor.ValidateDefaultEvictorArgs,
defaultevictor.SetDefaults_DefaultEvictorArgs,
pluginregistry.PluginRegistry,
)
registerDefaultEvictor(pluginregistry.PluginRegistry)
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
var evictedPods []string
@@ -524,15 +411,7 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
})
// plugin implementing Filter extension point
pluginregistry.Register(
pluginName,
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
&fakeplugin.FakeFilterPlugin{},
&fakeplugin.FakeFilterPluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
fakeplugin.RegisterFakeFilterPlugin(pluginName, fakeFilterPlugin, pluginregistry.PluginRegistry)
fakePluginName := fmt.Sprintf("FakePlugin_%v", i)
fakePlugin := fakeplugin.FakePlugin{}
@@ -557,26 +436,10 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
return true, false, nil
})
pluginregistry.Register(
fakePluginName,
fakeplugin.NewPluginFncFromFake(&fakePlugin),
&fakeplugin.FakePlugin{},
&fakeplugin.FakePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
pluginregistry.PluginRegistry,
)
fakeplugin.RegisterFakePlugin(fakePluginName, &fakePlugin, pluginregistry.PluginRegistry)
}
pluginregistry.Register(
defaultevictor.PluginName,
defaultevictor.New,
&defaultevictor.DefaultEvictor{},
&defaultevictor.DefaultEvictorArgs{},
defaultevictor.ValidateDefaultEvictorArgs,
defaultevictor.SetDefaults_DefaultEvictorArgs,
pluginregistry.PluginRegistry,
)
registerDefaultEvictor(pluginregistry.PluginRegistry)
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
var evictedPods []string
@@ -680,3 +543,325 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
t.Errorf("check for balance invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
}
}
// verifyInstanceIDsMatch verifies that instance IDs captured at creation, deschedule, and balance match
func verifyInstanceIDsMatch(t *testing.T, profileInstanceID string, pluginNames []string, creationIDs, descheduleIDs, balanceIDs map[string]string) {
for _, pluginName := range pluginNames {
creationID := creationIDs[pluginName]
descheduleID := descheduleIDs[pluginName]
balanceID := balanceIDs[pluginName]
if creationID == "" {
t.Errorf("Profile %s, plugin %s: plugin creation did not capture instance ID", profileInstanceID, pluginName)
}
if descheduleID == "" {
t.Errorf("Profile %s, plugin %s: deschedule extension point did not capture instance ID", profileInstanceID, pluginName)
}
if balanceID == "" {
t.Errorf("Profile %s, plugin %s: balance extension point did not capture instance ID", profileInstanceID, pluginName)
}
// Verify all IDs match
if creationID != descheduleID {
t.Errorf("Profile %s, plugin %s: instance ID mismatch - creation: %s, deschedule: %s", profileInstanceID, pluginName, creationID, descheduleID)
}
if creationID != balanceID {
t.Errorf("Profile %s, plugin %s: instance ID mismatch - creation: %s, balance: %s", profileInstanceID, pluginName, creationID, balanceID)
}
if descheduleID != balanceID {
t.Errorf("Profile %s, plugin %s: instance ID mismatch - deschedule: %s, balance: %s", profileInstanceID, pluginName, descheduleID, balanceID)
}
}
}
// verifyInstanceIDFormat verifies that instance IDs have correct format and sequential indices
func verifyInstanceIDFormat(t *testing.T, profileInstanceID string, pluginNames []string, pluginIDs map[string]string) sets.Set[string] {
if len(pluginIDs) != len(pluginNames) {
t.Errorf("Profile %s: expected %d plugins to be invoked, got %d", profileInstanceID, len(pluginNames), len(pluginIDs))
}
// Collect all instance IDs for this profile
profileInstanceIDs := sets.New[string]()
for pluginName, instanceID := range pluginIDs {
if instanceID == "" {
t.Errorf("Profile %s, plugin %s: expected instance ID to be set, got empty string", profileInstanceID, pluginName)
}
profileInstanceIDs.Insert(instanceID)
}
// Verify all IDs within this profile are unique
if profileInstanceIDs.Len() != len(pluginIDs) {
t.Errorf("Profile %s: duplicate instance IDs found", profileInstanceID)
}
// Verify all IDs match the expected format: "{profileInstanceID}-{index}"
// and contain sequential indices from 0 to n-1
expectedIndices := sets.New[int]()
for i := 0; i < len(pluginNames); i++ {
expectedIndices.Insert(i)
}
actualIndices := sets.New[int]()
for pluginName, instanceID := range pluginIDs {
var idx int
expectedPrefix := profileInstanceID + "-"
if !strings.HasPrefix(instanceID, expectedPrefix) {
t.Errorf("Profile %s, plugin %s: instance ID %s does not start with %s", profileInstanceID, pluginName, instanceID, expectedPrefix)
continue
}
_, err := fmt.Sscanf(instanceID, profileInstanceID+"-%d", &idx)
if err != nil {
t.Errorf("Profile %s, plugin %s: instance ID %s does not match expected format", profileInstanceID, pluginName, instanceID)
continue
}
actualIndices.Insert(idx)
}
// Verify we have indices 0 through n-1
diff := cmp.Diff(expectedIndices, actualIndices)
if diff != "" {
t.Errorf("Profile %s: instance ID indices mismatch (-want +got):\n%s", profileInstanceID, diff)
}
return profileInstanceIDs
}
func TestPluginInstanceIDs(t *testing.T) {
tests := []struct {
name string
profiles []struct {
profileInstanceID string
pluginNames []string
}
}{
{
name: "single plugin gets instance ID",
profiles: []struct {
profileInstanceID string
pluginNames []string
}{
{
profileInstanceID: "0",
pluginNames: []string{"TestPlugin"},
},
},
},
{
name: "two plugins get different instance IDs",
profiles: []struct {
profileInstanceID string
pluginNames []string
}{
{
profileInstanceID: "0",
pluginNames: []string{"Plugin_0", "Plugin_1"},
},
},
},
{
name: "three profiles with two plugins each get unique instance IDs",
profiles: []struct {
profileInstanceID string
pluginNames []string
}{
{
profileInstanceID: "0",
pluginNames: []string{"Plugin_A", "Plugin_B"},
},
{
profileInstanceID: "1",
pluginNames: []string{"Plugin_C", "Plugin_D"},
},
{
profileInstanceID: "2",
pluginNames: []string{"Plugin_E", "Plugin_F"},
},
},
},
{
name: "three profiles with same plugin names get different instance IDs per profile",
profiles: []struct {
profileInstanceID string
pluginNames []string
}{
{
profileInstanceID: "0",
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
},
{
profileInstanceID: "1",
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
},
{
profileInstanceID: "2",
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
n1 := testutils.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := testutils.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{n1, n2}
// Track instance IDs by profile from different stages
profileDescheduleIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from Deschedule execution)
profileBalanceIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from Balance execution)
profileCreationIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from plugin creation)
registry := pluginregistry.NewRegistry()
// Collect all distinct plugin names across all profiles
allPluginNames := sets.New[string]()
for _, profileCfg := range test.profiles {
allPluginNames.Insert(profileCfg.pluginNames...)
}
// Helper function to validate and store instance ID
captureInstanceID := func(instanceID, pluginName string, targetMap map[string]map[string]string) {
parts := strings.Split(instanceID, "-")
if len(parts) < 2 {
t.Fatalf("Plugin %s: instance ID %s does not have expected format 'profileID-index'", pluginName, instanceID)
}
profileID := parts[0]
if targetMap[profileID] == nil {
targetMap[profileID] = make(map[string]string)
}
targetMap[profileID][pluginName] = instanceID
}
// Register all plugins before creating profiles
for _, pluginName := range allPluginNames.UnsortedList() {
// Capture plugin name for closure
name := pluginName
pluginregistry.Register(
pluginName,
func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePlugin := &fakeplugin.FakePlugin{PluginName: name}
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
captureInstanceID(dAction.Handle().PluginInstanceID(), name, profileDescheduleIDs)
return true, false, nil
}
return false, false, nil
})
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
if bAction, ok := action.(fakeplugin.BalanceAction); ok {
captureInstanceID(bAction.Handle().PluginInstanceID(), name, profileBalanceIDs)
return true, false, nil
}
return false, false, nil
})
// Use NewPluginFncFromFakeWithReactor to wrap and capture instance ID at creation
builder := fakeplugin.NewPluginFncFromFakeWithReactor(fakePlugin, func(action fakeplugin.ActionImpl) {
captureInstanceID(action.Handle().PluginInstanceID(), name, profileCreationIDs)
})
return builder(ctx, args, handle)
},
&fakeplugin.FakePlugin{},
&fakeplugin.FakePluginArgs{},
fakeplugin.ValidateFakePluginArgs,
fakeplugin.SetDefaults_FakePluginArgs,
registry,
)
}
client := fakeclientset.NewSimpleClientset(n1, n2)
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
ctx,
client,
nil,
defaultevictor.DefaultEvictorArgs{},
nil,
)
if err != nil {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
// Create all profiles
var profiles []*profileImpl
for _, profileCfg := range test.profiles {
var pluginConfigs []api.PluginConfig
for _, pluginName := range profileCfg.pluginNames {
pluginConfigs = append(pluginConfigs, api.PluginConfig{
Name: pluginName,
Args: &fakeplugin.FakePluginArgs{},
})
}
prfl, err := NewProfile(
ctx,
api.DeschedulerProfile{
Name: "test-profile",
PluginConfigs: pluginConfigs,
Plugins: api.Plugins{
Deschedule: api.PluginSet{
Enabled: profileCfg.pluginNames,
},
Balance: api.PluginSet{
Enabled: profileCfg.pluginNames,
},
},
},
registry,
WithClientSet(client),
WithSharedInformerFactory(handle.SharedInformerFactoryImpl),
WithPodEvictor(podEvictor),
WithGetPodsAssignedToNodeFnc(handle.GetPodsAssignedToNodeFuncImpl),
WithProfileInstanceID(profileCfg.profileInstanceID),
)
if err != nil {
t.Fatalf("unable to create profile: %v", err)
}
profiles = append(profiles, prfl)
}
// Run deschedule and balance plugins for all profiles
for _, prfl := range profiles {
prfl.RunDeschedulePlugins(ctx, nodes)
prfl.RunBalancePlugins(ctx, nodes)
}
// Verify creation, deschedule, and balance IDs all match
for _, profileCfg := range test.profiles {
verifyInstanceIDsMatch(
t,
profileCfg.profileInstanceID,
profileCfg.pluginNames,
profileCreationIDs[profileCfg.profileInstanceID],
profileDescheduleIDs[profileCfg.profileInstanceID],
profileBalanceIDs[profileCfg.profileInstanceID],
)
}
// Verify all plugins were invoked and have correct instance IDs
allInstanceIDs := sets.New[string]()
for _, profileCfg := range test.profiles {
profileInstanceIDs := verifyInstanceIDFormat(
t,
profileCfg.profileInstanceID,
profileCfg.pluginNames,
profileDescheduleIDs[profileCfg.profileInstanceID],
)
allInstanceIDs = allInstanceIDs.Union(profileInstanceIDs)
}
// Verify all instance IDs are unique across all profiles
totalExpectedPlugins := 0
for _, profileCfg := range test.profiles {
totalExpectedPlugins += len(profileCfg.pluginNames)
}
if allInstanceIDs.Len() != totalExpectedPlugins {
t.Errorf("Expected %d unique instance IDs across all profiles, got %d", totalExpectedPlugins, allInstanceIDs.Len())
}
})
}
}

View File

@@ -41,6 +41,9 @@ type Handle interface {
GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc
SharedInformerFactory() informers.SharedInformerFactory
MetricsCollector() *metricscollector.MetricsCollector
// PluginInstanceID returns a unique identifier for this plugin instance.
// The ID is unique across all plugin instances in a configuration.
PluginInstanceID() string
}
// Evictor defines an interface for filtering and evicting pods

View File

@@ -110,6 +110,12 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st
opts = append(opts, otlptracegrpc.WithInsecure())
}
if os.Getenv("USER") == "" {
if err := os.Setenv("USER", "descheduler"); err != nil {
klog.ErrorS(err, "failed to set USER environment variable")
}
}
client := otlptracegrpc.NewClient(opts...)
exporter, err := otlptrace.New(ctx, client)

View File

@@ -11,24 +11,10 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
resourcehelper "k8s.io/component-helpers/resource"
"k8s.io/klog/v2"
)
// GetResourceRequest finds and returns the request value for a specific resource.
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
if resource == v1.ResourcePods {
return 1
}
requestQuantity := GetResourceRequestQuantity(pod, resource)
if resource == v1.ResourceCPU {
return requestQuantity.MilliValue()
}
return requestQuantity.Value()
}
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
requestQuantity := resource.Quantity{}
@@ -42,26 +28,8 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
}
for _, container := range pod.Spec.Containers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
}
for _, container := range pod.Spec.InitContainers {
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
if requestQuantity.Cmp(rQuantity) < 0 {
requestQuantity = rQuantity.DeepCopy()
}
}
}
// We assume pod overhead feature gate is enabled.
// We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil {
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
requestQuantity.Add(podOverhead)
}
if rQuantity, ok := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{})[resourceName]; ok {
requestQuantity.Add(rQuantity)
}
return requestQuantity
@@ -171,59 +139,9 @@ func GetPodSource(pod *v1.Pod) (string, error) {
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
// total container resource requests and to the total container limits which have a
// non-zero quantity.
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(reqs, container.Resources.Requests)
addResourceList(limits, container.Resources.Limits)
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
maxResourceList(reqs, container.Resources.Requests)
maxResourceList(limits, container.Resources.Limits)
}
// We assume pod overhead feature gate is enabled.
// We can't import the scheduler settings so we will inherit the default.
if pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return
}
// addResourceList adds the resources in newList to list
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource
// either list
func maxResourceList(list, new v1.ResourceList) {
for name, quantity := range new {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
continue
} else {
if quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
func PodRequestsAndLimits(pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
opts := resourcehelper.PodResourcesOptions{}
return resourcehelper.PodRequests(pod, opts), resourcehelper.PodLimits(pod, opts)
}
// PodToleratesTaints returns true if a pod tolerates one node's taints

View File

@@ -0,0 +1,373 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/component-base/config"
"k8s.io/utils/ptr"
storagev1 "k8s.io/api/storage/v1"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
apiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
)
// protectPodsWithPVCPolicy returns a descheduler policy that protects pods
// using PVCs of specific storage classes from eviction while, at the same
// time, evicting pods that have restarted more than 3 times.
func protectPodsWithPVCPolicy(namespace string, protectedsc []defaultevictor.ProtectedStorageClass) *apiv1alpha2.DeschedulerPolicy {
return &apiv1alpha2.DeschedulerPolicy{
Profiles: []apiv1alpha2.DeschedulerProfile{
{
Name: "ProtectPodsWithPVCPolicy",
PluginConfigs: []apiv1alpha2.PluginConfig{
{
Name: removepodshavingtoomanyrestarts.PluginName,
Args: runtime.RawExtension{
Object: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 3,
IncludingInitContainers: true,
Namespaces: &api.Namespaces{
Include: []string{namespace},
},
},
},
},
{
Name: defaultevictor.PluginName,
Args: runtime.RawExtension{
Object: &defaultevictor.DefaultEvictorArgs{
PodProtections: defaultevictor.PodProtections{
DefaultDisabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithLocalStorage,
},
ExtraEnabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithPVC,
},
Config: &defaultevictor.PodProtectionsConfig{
PodsWithPVC: &defaultevictor.PodsWithPVCConfig{
ProtectedStorageClasses: protectedsc,
},
},
},
},
},
},
},
Plugins: apiv1alpha2.Plugins{
Filter: apiv1alpha2.PluginSet{
Enabled: []string{
defaultevictor.PluginName,
},
},
Deschedule: apiv1alpha2.PluginSet{
Enabled: []string{
removepodshavingtoomanyrestarts.PluginName,
},
},
},
},
},
}
}
// TestProtectPodsWithPVC tests that pods using PVCs are protected.
func TestProtectPodsWithPVC(t *testing.T) {
ctx := context.Background()
initPluginRegistry()
cli, err := client.CreateClient(
config.ClientConnectionConfiguration{
Kubeconfig: os.Getenv("KUBECONFIG"),
}, "",
)
if err != nil {
t.Fatalf("error during kubernetes client creation with %v", err)
}
// start by finding out what is the default storage class in the
// cluster. if none is found then this test can't run.
scs, err := cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("error listing storage classes: %v", err)
}
var defclass *storagev1.StorageClass
for _, sc := range scs.Items {
if _, ok := sc.Annotations["storageclass.kubernetes.io/is-default-class"]; ok {
defclass = &sc
break
}
}
if defclass == nil {
t.Fatalf("no default storage class found, unable to run the test")
}
// now we replicate the default storage class so we have two different
// storage classes in the cluster. this is useful to test protected vs
// unprotected pods using PVCs.
unprotectedsc := defclass.DeepCopy()
delete(unprotectedsc.Annotations, "storageclass.kubernetes.io/is-default-class")
unprotectedsc.ResourceVersion = ""
unprotectedsc.Name = "unprotected"
if _, err = cli.StorageV1().StorageClasses().Create(ctx, unprotectedsc, metav1.CreateOptions{}); err != nil {
t.Fatalf("error creating unprotected storage class: %v", err)
}
defer cli.StorageV1().StorageClasses().Delete(ctx, unprotectedsc.Name, metav1.DeleteOptions{})
// this is the namespace we are going to use for all testing
t.Logf("creating testing namespace %v", t.Name())
namespace := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("e2e-%s", strings.ToLower(t.Name())),
},
}
if _, err := cli.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", namespace.Name)
}
defer cli.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{})
for _, tc := range []struct {
name string
policy *apiv1alpha2.DeschedulerPolicy
enableGracePeriod bool
expectedEvictedPodCount uint
pvcs []*v1.PersistentVolumeClaim
volumes []v1.Volume
}{
{
name: "evict pods from unprotected storage class",
policy: protectPodsWithPVCPolicy(
namespace.Name, []defaultevictor.ProtectedStorageClass{
{
Name: defclass.Name,
},
},
),
expectedEvictedPodCount: 4,
pvcs: []*v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-unprotected-claim",
Namespace: namespace.Name,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: ptr.To(unprotectedsc.Name),
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
volumes: []v1.Volume{
{
Name: "test-unprotected-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "test-unprotected-claim",
},
},
},
},
},
{
name: "preserve pods from protected storage class",
policy: protectPodsWithPVCPolicy(
namespace.Name, []defaultevictor.ProtectedStorageClass{
{
Name: defclass.Name,
},
},
),
expectedEvictedPodCount: 0,
pvcs: []*v1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "test-protected-claim",
Namespace: namespace.Name,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
volumes: []v1.Volume{
{
Name: "test-protected-volume",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: "test-protected-claim",
},
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Logf("creating testing pvcs in namespace %v", namespace.Name)
for _, pvc := range tc.pvcs {
if _, err = cli.CoreV1().PersistentVolumeClaims(namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}); err != nil {
t.Fatalf("error creating PVC: %v", err)
}
defer cli.CoreV1().PersistentVolumeClaims(namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
}
deploy := buildTestDeployment(
"restart-pod",
namespace.Name,
4,
map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
func(deployment *appsv1.Deployment) {
deployment.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
deployment.Spec.Template.Spec.Containers[0].Args = []string{"-c", "sleep 1s && exit 1"}
},
)
deploy.Spec.Template.Spec.Volumes = tc.volumes
t.Logf("creating deployment %v", deploy.Name)
if _, err := cli.AppsV1().Deployments(deploy.Namespace).Create(ctx, deploy, metav1.CreateOptions{}); err != nil {
t.Fatalf("error creating deployment: %v", err)
}
defer cli.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{})
// wait for 3 restarts
waitPodRestartCount(ctx, cli, namespace.Name, t, 3)
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("unable to initialize server: %v\n", err)
}
rs.Client, rs.EventClient, rs.DefaultFeatureGates = cli, cli, initFeatureGates()
preRunNames := sets.NewString(getCurrentPodNames(ctx, cli, namespace.Name, t)...)
// deploy the descheduler with the configured policy
policycm, err := deschedulerPolicyConfigMap(tc.policy)
if err != nil {
t.Fatalf("Error creating %q CM: %v", policycm.Name, err)
}
t.Logf("creating %q policy CM with PodsWithPVC protection enabled...", policycm.Name)
if _, err = cli.CoreV1().ConfigMaps(policycm.Namespace).Create(
ctx, policycm, metav1.CreateOptions{},
); err != nil {
t.Fatalf("error creating %q CM: %v", policycm.Name, err)
}
defer func() {
t.Logf("deleting %q CM...", policycm.Name)
if err := cli.CoreV1().ConfigMaps(policycm.Namespace).Delete(
ctx, policycm.Name, metav1.DeleteOptions{},
); err != nil {
t.Fatalf("unable to delete %q CM: %v", policycm.Name, err)
}
}()
desdep := deschedulerDeployment(namespace.Name)
t.Logf("creating descheduler deployment %v", desdep.Name)
if _, err := cli.AppsV1().Deployments(desdep.Namespace).Create(
ctx, desdep, metav1.CreateOptions{},
); err != nil {
t.Fatalf("error creating %q deployment: %v", desdep.Name, err)
}
deschedulerPodName := ""
defer func() {
if deschedulerPodName != "" {
printPodLogs(ctx, t, cli, deschedulerPodName)
}
t.Logf("deleting %q deployment...", desdep.Name)
if err := cli.AppsV1().Deployments(desdep.Namespace).Delete(
ctx, desdep.Name, metav1.DeleteOptions{},
); err != nil {
t.Fatalf("unable to delete %q deployment: %v", desdep.Name, err)
}
waitForPodsToDisappear(ctx, t, cli, desdep.Labels, desdep.Namespace)
}()
t.Logf("waiting for the descheduler pod running")
deschedulerPods := waitForPodsRunning(ctx, t, cli, desdep.Labels, 1, desdep.Namespace)
if len(deschedulerPods) != 0 {
deschedulerPodName = deschedulerPods[0].Name
}
if err := wait.PollUntilContextTimeout(
ctx, 5*time.Second, time.Minute, true,
func(ctx context.Context) (bool, error) {
podList, err := cli.CoreV1().Pods(namespace.Name).List(
ctx, metav1.ListOptions{},
)
if err != nil {
t.Fatalf("error listing pods: %v", err)
}
names := []string{}
for _, item := range podList.Items {
names = append(names, item.Name)
}
currentRunNames := sets.NewString(names...)
actualEvictedPod := preRunNames.Difference(currentRunNames)
actualEvictedPodCount := uint(actualEvictedPod.Len())
if actualEvictedPodCount < tc.expectedEvictedPodCount {
t.Logf(
"expecting %v number of pods evicted, got %v instead",
tc.expectedEvictedPodCount, actualEvictedPodCount,
)
return false, nil
}
return true, nil
},
); err != nil {
t.Fatalf("error waiting for descheduler running: %v", err)
}
waitForTerminatingPodsToDisappear(ctx, t, cli, namespace.Name)
})
}
}

View File

@@ -29,10 +29,13 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clientgotesting "k8s.io/client-go/testing"
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
utilptr "k8s.io/utils/ptr"
)
@@ -89,6 +92,25 @@ func BuildTestPDB(name, appLabel string) *policyv1.PodDisruptionBudget {
return pdb
}
func BuildTestPVC(name, storageClass string) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: name,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &storageClass,
Resources: v1.VolumeResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
}
return pvc
}
// BuildPodMetrics creates a test podmetrics with given parameters.
func BuildPodMetrics(name string, millicpu, mem int64) *v1beta1.PodMetrics {
return &v1beta1.PodMetrics{
@@ -230,11 +252,30 @@ func SetNormalOwnerRef(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList()
}
// SetMirrorPodAnnotation sets the given pod's annotations to mirror pod annotations
func SetMirrorPodAnnotation(pod *v1.Pod) {
pod.Annotations = GetMirrorPodAnnotation()
}
// SetPodPriority sets the given pod's priority
func SetPodPriority(pod *v1.Pod, priority int32) {
pod.Spec.Priority = &priority
}
func SetHostPathEmptyDirVolumeSource(pod *v1.Pod) {
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
}
// SetNodeUnschedulable sets the given node unschedulable
func SetNodeUnschedulable(node *v1.Node) {
node.Spec.Unschedulable = true
@@ -322,3 +363,18 @@ func PodWithPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) *v1.P
inputPod.Labels = map[string]string{labelKey: labelValue}
return inputPod
}
func RegisterEvictedPodsCollector(fakeClient *fake.Clientset, evictedPods *[]string) {
fakeClient.PrependReactor("create", "pods", func(action clientgotesting.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(clientgotesting.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policyv1.Eviction); matched {
*evictedPods = append(*evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
})
}

13
vendor/k8s.io/component-helpers/resource/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,13 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
approvers:
- api-approvers
reviewers:
- sig-node-reviewers
- sig-scheduling
labels:
- sig/node
- sig/scheduling
- kind/api-change

455
vendor/k8s.io/component-helpers/resource/helpers.go generated vendored Normal file
View File

@@ -0,0 +1,455 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// ContainerType signifies container type
type ContainerType int
const (
// Containers is for normal containers
Containers ContainerType = 1 << iota
// InitContainers is for init containers
InitContainers
)
// PodResourcesOptions controls the behavior of PodRequests and PodLimits.
type PodResourcesOptions struct {
// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
// functions. All existing values in Reuse will be lost.
Reuse v1.ResourceList
// UseStatusResources indicates whether resources reported by the PodStatus should be considered
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
// feature is not enabled.
UseStatusResources bool
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
ExcludeOverhead bool
// ContainerFn is called with the effective resources required for each container within the pod.
ContainerFn func(res v1.ResourceList, containerType ContainerType)
// NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources
// with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified.
NonMissingContainerRequests v1.ResourceList
// SkipPodLevelResources controls whether pod-level resources should be skipped
// from the calculation. If pod-level resources are not set in PodSpec,
// pod-level resources will always be skipped.
SkipPodLevelResources bool
// SkipContainerLevelResources
SkipContainerLevelResources bool
}
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
return supportedPodLevelResources.Clone().Insert(v1.ResourceHugePagesPrefix)
}
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
// resource management through the PodLevelResources feature. Returns true if
// the resource is supported.
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
return supportedPodLevelResources.Has(name) || strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
}
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
// It returns true if either the Requests or Limits maps are non-empty.
func IsPodLevelResourcesSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
for resourceName := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if
// Requests map is non-empty.
func IsPodLevelRequestsSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if len(pod.Spec.Resources.Requests) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// IsPodLevelLimitsSet checks if pod-level limits are set. It returns true if
// Limits map is non-empty and contains at least one supported pod-level resource.
func IsPodLevelLimitsSet(pod *v1.Pod) bool {
if pod.Spec.Resources == nil {
return false
}
if len(pod.Spec.Resources.Limits) == 0 {
return false
}
for resourceName := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
return true
}
}
return false
}
// PodRequests computes the total pod requests per the PodResourcesOptions supplied.
// If PodResourcesOptions is nil, then the requests are returned including pod overhead.
// If the PodLevelResources feature is enabled AND the pod-level resources are set,
// those pod-level values are used in calculating Pod Requests.
// The computation is part of the API and must be reviewed as an API change.
func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
reqs := v1.ResourceList{}
if !opts.SkipContainerLevelResources {
reqs = AggregateContainerRequests(pod, opts)
}
if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) {
for resourceName, quantity := range pod.Spec.Resources.Requests {
if IsSupportedPodLevelResource(resourceName) {
reqs[resourceName] = quantity
}
}
}
// Add overhead for running a pod to the sum of requests if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
}
return reqs
}
// AggregateContainerRequests computes the total resource requests of all the containers
// in a pod. This computation folows the formula defined in the KEP for sidecar
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for more details.
func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
reqs := reuseOrClearResourceList(opts.Reuse)
var containerStatuses map[string]*v1.ContainerStatus
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
for i := range pod.Status.InitContainerStatuses {
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
}
}
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerReqs = determineContainerReqs(pod, &container, cs)
}
}
if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerReqs, Containers)
}
addResourceList(reqs, containerReqs)
}
restartableInitContainerReqs := v1.ResourceList{}
initContainerReqs := v1.ResourceList{}
// init containers define the minimum of any resource
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests
if opts.UseStatusResources {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerReqs = determineContainerReqs(pod, &container, cs)
}
}
}
if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
}
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReqs)
containerReqs = restartableInitContainerReqs
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitContainerReqs)
containerReqs = tmp
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerReqs, InitContainers)
}
maxResourceList(initContainerReqs, containerReqs)
}
maxResourceList(reqs, initContainerReqs)
return reqs
}
// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
return max(cs.Resources.Requests, cs.AllocatedResources)
}
return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
}
// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
if IsPodResizeInfeasible(pod) {
return cs.Resources.Limits.DeepCopy()
}
return max(container.Resources.Limits, cs.Resources.Limits)
}
// IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
func IsPodResizeInfeasible(pod *v1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodResizePending {
return condition.Reason == v1.PodReasonInfeasible
}
}
return false
}
// IsPodResizeDeferred returns true if the pod condition PodResizePending is set to deferred.
func IsPodResizeDeferred(pod *v1.Pod) bool {
for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodResizePending {
return condition.Reason == v1.PodReasonDeferred
}
}
return false
}
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
cp := v1.ResourceList{}
for k, v := range reqs {
cp[k] = v.DeepCopy()
}
for k, v := range nonMissing {
if _, found := reqs[k]; !found {
rk := cp[k]
rk.Add(v)
cp[k] = rk
}
}
return cp
}
// PodLimits computes the pod limits per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then
// the limits are returned including pod overhead for any non-zero limits. The computation is part of the API and must be reviewed
// as an API change.
func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
limits := AggregateContainerLimits(pod, opts)
if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) {
for resourceName, quantity := range pod.Spec.Resources.Limits {
if IsSupportedPodLevelResource(resourceName) {
limits[resourceName] = quantity
}
}
}
// Add overhead to non-zero limits if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
for name, quantity := range pod.Spec.Overhead {
if value, ok := limits[name]; ok && !value.IsZero() {
value.Add(quantity)
limits[name] = value
}
}
}
return limits
}
// AggregateContainerLimits computes the aggregated resource limits of all the containers
// in a pod. This computation follows the formula defined in the KEP for sidecar
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
// for more details.
func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
// attempt to reuse the maps if passed, or allocate otherwise
limits := reuseOrClearResourceList(opts.Reuse)
var containerStatuses map[string]*v1.ContainerStatus
if opts.UseStatusResources {
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
for i := range pod.Status.InitContainerStatuses {
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
}
}
for _, container := range pod.Spec.Containers {
containerLimits := container.Resources.Limits
if opts.UseStatusResources {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerLimits = determineContainerLimits(pod, &container, cs)
}
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, Containers)
}
addResourceList(limits, containerLimits)
}
restartableInitContainerLimits := v1.ResourceList{}
initContainerLimits := v1.ResourceList{}
// init containers define the minimum of any resource
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
containerLimits := container.Resources.Limits
if opts.UseStatusResources {
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
cs, found := containerStatuses[container.Name]
if found && cs.Resources != nil {
containerLimits = determineContainerLimits(pod, &container, cs)
}
}
}
// Is the init container marked as a restartable init container?
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, InitContainers)
}
maxResourceList(initContainerLimits, containerLimits)
}
maxResourceList(limits, initContainerLimits)
return limits
}
// addResourceList adds the resources in newList to list.
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource in newList
func maxResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
// max returns the result of max(a, b...) for each named resource and is only used if we can't
// accumulate into an existing resource list
func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList {
var result v1.ResourceList
if a != nil {
result = a.DeepCopy()
} else {
result = v1.ResourceList{}
}
for _, other := range b {
maxResourceList(result, other)
}
return result
}
// reuseOrClearResourceList is a helper for avoiding excessive allocations of
// resource lists within the inner loop of resource calculations.
func reuseOrClearResourceList(reuse v1.ResourceList) v1.ResourceList {
if reuse == nil {
return make(v1.ResourceList, 4)
}
for k := range reuse {
delete(reuse, k)
}
return reuse
}

1
vendor/modules.txt vendored
View File

@@ -1300,6 +1300,7 @@ k8s.io/component-base/zpages/httputil
k8s.io/component-base/zpages/statusz
# k8s.io/component-helpers v0.34.0
## explicit; go 1.24.0
k8s.io/component-helpers/resource
k8s.io/component-helpers/scheduling/corev1
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
# k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f