1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Compare commits

..

382 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
85b1d97dda Merge pull request #1810 from ingvagabund/refactorings
chore(pkg/descheduler): make TestPodEvictorReset table driven
2026-01-20 19:08:49 +05:30
Jan Chaloupka
b6aadc1643 chore(pkg/descheduler): make TestPodEvictorReset table driven 2026-01-20 12:51:58 +01:00
Kubernetes Prow Robot
c4ec31684f Merge pull request #1802 from ingvagabund/global-node-selector-as-indexer
feat: register a node indexer for the global node selector instead of listing nodes with the selector
2026-01-12 15:08:13 +05:30
Kubernetes Prow Robot
7d2c31cd39 Merge pull request #1808 from ingvagabund/profile-instance-id
feat(profile): inject a plugin instance ID to each built plugin
2026-01-09 15:33:43 +05:30
Jan Chaloupka
cf9edca33c feat(profile): inject a plugin instance ID to each built plugin 2026-01-06 12:26:35 +01:00
Kubernetes Prow Robot
f164943257 Merge pull request #1807 from ingvagabund/docs
doc(Design Decisions FAQ): Why doesn't the framework provide helpers for registering and retrieving indexers for plugins
2026-01-05 21:20:39 +05:30
Jan Chaloupka
1fe9e2c345 doc(Design Decisions FAQ): Why doesn't the framework provide helpers for registering and retrieving indexers for plugins 2026-01-05 16:10:39 +01:00
Kubernetes Prow Robot
16ccff8ed8 Merge pull request #1806 from ingvagabund/profile-refactoring
refactor(pkg/framework/profile): dedup unit test code
2026-01-05 15:12:37 +05:30
Jan Chaloupka
38f0f15787 chore: make gen 2026-01-04 20:23:13 +01:00
Jan Chaloupka
52f2aea444 refactor(pkg/framework/profile): add registerDefaultEvictor helper function 2026-01-04 19:43:47 +01:00
Jan Chaloupka
f3c63011cc refactor(pkg/framework/profile): add fake plugin registration helpers 2026-01-04 19:43:29 +01:00
Jan Chaloupka
47b939dd86 refactor(pkg/framework/profile): build a profile through a shared function to reduce code duplication 2026-01-04 19:42:30 +01:00
Kubernetes Prow Robot
89c88f483b Merge pull request #1800 from ingvagabund/readnodes-unit-test
fix(TestReadyNodesWithNodeSelector): make sure nodeLister.List always returns a non-empty list so the lister is always tested
2025-12-26 20:48:31 +05:30
Kubernetes Prow Robot
d558fa3a5b Merge pull request #1804 from ingvagabund/refactorings
refactor(plugins): simplify the way pods are created
2025-12-21 14:38:30 -08:00
Jan Chaloupka
7ef3673d4c refactor: inline single-statement apply functions in BuildTestPod calls 2025-12-21 21:41:59 +01:00
Jan Chaloupka
988e0b8868 refactor: replace pod.Spec.Volumes with test.SetHostPathEmptyDirVolumeSource in plugin tests 2025-12-21 21:36:24 +01:00
Jan Chaloupka
fc8ae3b4e8 refactor: replace pod.Spec.Priority with test.SetPodPriority in plugin tests 2025-12-21 21:36:05 +01:00
Kubernetes Prow Robot
6d7fedc982 Merge pull request #1803 from ingvagabund/refactor-defaultevictir-test
refactor(TestDefaultEvictor): de-dup code and use helpers
2025-12-20 06:54:31 -08:00
Jan Chaloupka
769ded35f1 make fmt 2025-12-20 15:24:37 +01:00
Jan Chaloupka
3283635149 refactor(defaultevictor_test): move newProtectedStorageClassesConfig to package level
Move the newProtectedStorageClassesConfig helper function from local scope
to package level so it can be reused by both TestDefaultEvictorFilter and
Test_protectedPVCStorageClasses, eliminating code duplication.
2025-12-20 15:17:06 +01:00
Jan Chaloupka
994ce3e2f7 refactor(TestDefaultEvictor): add setPodPVCVolumeWithFooClaimName helper function 2025-12-20 14:36:10 +01:00
Jan Chaloupka
90e4115b78 refactor(TestDefaultEvictor): add setPodLocalStorage helper function 2025-12-20 14:29:19 +01:00
Jan Chaloupka
8913d79d14 refactor(TestDefaultEvictor): replace mirror pod annotation assignments with test.SetMirrorPodAnnotation 2025-12-20 14:28:08 +01:00
Jan Chaloupka
9a5d7e8286 refactor(TestDefaultEvictor): replace system critical priority assignments with test.SetPodPriority 2025-12-20 14:27:18 +01:00
Jan Chaloupka
674e463bc2 refactor(TestDefaultEvictor): replace manual owner reference assignments with test utilities 2025-12-20 14:25:58 +01:00
Jan Chaloupka
1df3ef5030 refactor(TestDefaultEvictor): add setPodEvictAnnotation helper function 2025-12-20 14:22:28 +01:00
Jan Chaloupka
3068f8431a refactor(TestDefaultEvictor): add setPodNodeSelector helper function 2025-12-20 14:11:46 +01:00
Jan Chaloupka
dfd2b95d2d refactor(TestDefaultEvictor): add setNodeLabel helper function 2025-12-20 14:08:11 +01:00
Jan Chaloupka
3bb4529c34 refactor(TestDefaultEvictor): use test.SetNormalOwnerRef 2025-12-20 14:00:40 +01:00
Jan Chaloupka
b8765bd8ee refactor(TestDefaultEvictor): add setNodeTaint helper function 2025-12-20 13:54:50 +01:00
Jan Chaloupka
d666e4b830 refactor(TestDefaultEvictor): add buildTestPod helper function 2025-12-20 13:43:00 +01:00
Jan Chaloupka
08f733863e refactor(TestDefaultEvictor): add buildTestNode helper function 2025-12-20 13:25:39 +01:00
Jan Chaloupka
93a516a58a feat: register a node indexer for the global node selector instead of listing nodes with the selector
To avoid iterating through every node every time a list of nodes is
requested. This is a prerequisition work for introducing profile level
node selectors.
2025-12-19 23:25:24 +01:00
Jan Chaloupka
60da931e0e fix(TestReadyNodesWithNodeSelector): make sure nodeLister.List always returns a non-empty list so the lister is always tested
The case of an empty list of nodes from the lister is not easy to catch.
This change makes sure one more initial condition is met.
2025-12-18 17:06:25 +01:00
Kubernetes Prow Robot
12a9db4da0 Merge pull request #1798 from ingvagabund/contriburing-descheduler
doc: introduce contributing guidelines specific to the project
2025-12-15 12:43:47 -08:00
Jan Chaloupka
41da7497c3 doc: introduce contributing guidelines specific to the project
The document is to be extended on the fly
2025-12-15 21:11:50 +01:00
Kubernetes Prow Robot
b56f3cdae9 Merge pull request #1797 from ingvagabund/nodetaint-unit-test
refactor(TestDeletePodsViolatingNodeTaints): inline object creation
2025-12-15 09:51:45 -08:00
Jan Chaloupka
162a2d14b7 refactor(TestRemovePodsHavingTooManyRestarts): remove leftover comments 2025-12-15 18:20:38 +01:00
Jan Chaloupka
78788d72de refactor(node_taint_test): inline p15 2025-12-15 18:19:04 +01:00
Jan Chaloupka
956eeefede refactor(node_taint_test): inline p14 2025-12-15 18:18:59 +01:00
Jan Chaloupka
1f7bd1fba9 refactor(node_taint_test): inline p13 2025-12-15 18:18:56 +01:00
Jan Chaloupka
5fdf368593 refactor(node_taint_test): inline p12 2025-12-15 18:18:51 +01:00
Jan Chaloupka
50b6e178c1 refactor(node_taint_test): inline p11 2025-12-15 18:18:47 +01:00
Jan Chaloupka
c1ad532c46 refactor(node_taint_test): inline p10 2025-12-15 18:18:43 +01:00
Jan Chaloupka
7e40aae2dc refactor(node_taint_test): inline p9 2025-12-15 18:18:39 +01:00
Jan Chaloupka
e09bd976f5 refactor(node_taint_test): inline p8 2025-12-15 18:18:35 +01:00
Jan Chaloupka
ffb1f44144 refactor(node_taint_test): inline p7 2025-12-15 18:18:24 +01:00
Jan Chaloupka
cb595f2524 refactor(node_taint_test): inline p6 2025-12-15 18:17:34 +01:00
Jan Chaloupka
c46817f6df refactor(node_taint_test): inline p5 2025-12-15 18:17:30 +01:00
Jan Chaloupka
032db38d6c refactor(node_taint_test): inline p4 2025-12-15 18:17:26 +01:00
Jan Chaloupka
c1cd3ae794 refactor(node_taint_test): inline p3 2025-12-15 18:17:21 +01:00
Jan Chaloupka
060d9c8573 refactor(node_taint_test): inline p2 2025-12-15 18:17:17 +01:00
Jan Chaloupka
51bcf60ccf refactor(node_taint_test): inline p1 2025-12-15 18:17:12 +01:00
Jan Chaloupka
b472549cf6 refactor(node_taint_test): add withKubeSystemCriticalPod helper 2025-12-15 18:16:58 +01:00
Jan Chaloupka
c68e8a6d06 refactor(node_taint_test): add withTestTaintXToleration1 helper 2025-12-15 18:16:19 +01:00
Jan Chaloupka
68d9d4d044 refactor(node_taint_test): add datacenter label constants 2025-12-15 18:16:14 +01:00
Jan Chaloupka
452b1ff7d9 refactor(node_taint_test): add SetPodVolumes and withLocalStorageVolume helpers 2025-12-15 18:16:08 +01:00
Jan Chaloupka
f123f78b44 refactor: add SetSystemCriticalPriority helper function 2025-12-15 18:15:31 +01:00
Jan Chaloupka
ca0f7535fb refactor: add SetMirrorPodAnnotation helper function 2025-12-15 17:41:46 +01:00
Jan Chaloupka
78ff3fe92a refactor(node_taint_test): add withTestTaintToleration1 helper 2025-12-15 17:37:45 +01:00
Jan Chaloupka
0269283185 refactor(node_taint_test): add buildTestPodWithNormalOwnerRef helper 2025-12-15 17:34:18 +01:00
Jan Chaloupka
57ed329feb refactor(node_taint_test): inline node7 2025-12-15 17:26:04 +01:00
Jan Chaloupka
b96a41a745 refactor(node_taint_test): inline node6 2025-12-15 17:25:27 +01:00
Jan Chaloupka
6b6f7ba5c7 refactor(node_taint_test): inline node5 2025-12-15 17:24:53 +01:00
Jan Chaloupka
a3ca65ea14 refactor(node_taint_test): inline node4 2025-12-15 17:24:17 +01:00
Jan Chaloupka
d81580c93e refactor(node_taint_test): inline node3 2025-12-15 17:23:42 +01:00
Jan Chaloupka
0f7ff8a2b7 refactor(node_taint_test): inline node2 2025-12-15 17:23:08 +01:00
Jan Chaloupka
d27afd0319 refactor(node_taint_test): inline node1 2025-12-15 17:21:25 +01:00
Jan Chaloupka
3d48efdff4 refactor(node_taint_test): add dedicated functions for remaining nodes 2025-12-15 17:14:06 +01:00
Jan Chaloupka
e5d5cf2229 refactor(node_taint_test): create dedicated functions for taint configurations 2025-12-15 17:11:54 +01:00
Jan Chaloupka
f65209d4fa refactor(node_taint_test): inline addTaintsToNode 2025-12-15 17:08:50 +01:00
Jan Chaloupka
b9ceb9144f refactor(node_taint_test): remove default false settings for evict flags 2025-12-15 17:01:43 +01:00
Jan Chaloupka
2bbec0cbc6 refactor(node_taint_test): apply pod single creation convention for p15 2025-12-15 16:55:21 +01:00
Jan Chaloupka
a363da9806 refactor(node_taint_test): apply pod single creation convention for p14 2025-12-15 16:55:02 +01:00
Jan Chaloupka
63b3bd3b4d refactor(node_taint_test): apply pod single creation convention for p13 2025-12-15 16:54:44 +01:00
Jan Chaloupka
7fb935c650 refactor(node_taint_test): replace GetNormalPodOwnerRefList with SetNormalOwnerRef 2025-12-15 16:52:42 +01:00
Kubernetes Prow Robot
f85b2f8d4d Merge pull request #1796 from ingvagabund/nodeaffinity-unit-test
refactor(TestRemovePodsViolatingNodeAffinity): inline object creation
2025-12-15 07:45:47 -08:00
Jan Chaloupka
0580b5942c refactor(node_taint_test): apply pod single creation convention for p12 2025-12-15 16:44:46 +01:00
Jan Chaloupka
4171af7e8a refactor(node_taint_test): apply pod single creation convention for p11 2025-12-15 16:44:19 +01:00
Jan Chaloupka
a1678cd464 refactor(node_taint_test): apply pod single creation convention for p10 2025-12-15 16:44:01 +01:00
Jan Chaloupka
2f90d1dd01 refactor(node_taint_test): apply pod single creation convention for p9 2025-12-15 16:43:32 +01:00
Jan Chaloupka
f0cda32b6e refactor(node_taint_test): apply pod single creation convention for p8 2025-12-15 16:43:01 +01:00
Jan Chaloupka
43523113ff refactor(node_taint_test): apply pod single creation convention for p7 2025-12-15 16:42:34 +01:00
Jan Chaloupka
1b7889f4a3 refactor(node_taint_test): apply pod single creation convention for p6 2025-12-15 16:42:00 +01:00
Jan Chaloupka
b86315f097 refactor(node_taint_test): apply pod single creation convention for p5 2025-12-15 16:41:39 +01:00
Jan Chaloupka
0d496dfc5d refactor(node_taint_test): apply pod single creation convention for p4 2025-12-15 16:41:19 +01:00
Jan Chaloupka
d6b35eaed6 refactor(node_taint_test): apply pod single creation convention for p3 2025-12-15 16:40:41 +01:00
Jan Chaloupka
dc18f9f330 refactor(node_taint_test): apply pod single creation convention for p2 2025-12-15 16:39:54 +01:00
Jan Chaloupka
39212419e6 refactor(node_taint_test): apply pod single creation convention for p1 2025-12-15 16:38:35 +01:00
Jan Chaloupka
64f77ce6ee refactor(node_taint_test): apply node single creation convention for node7 2025-12-15 16:35:38 +01:00
Jan Chaloupka
ca5326c5c4 refactor(node_taint_test): apply node single creation convention for node6 2025-12-15 16:35:12 +01:00
Jan Chaloupka
9cf075ffc4 refactor(node_taint_test): apply node single creation convention for node5 2025-12-15 16:34:47 +01:00
Jan Chaloupka
3325fe0b8b refactor(node_taint_test): apply node single creation convention for node2 2025-12-15 16:33:28 +01:00
Jan Chaloupka
6c41ebd8f3 refactor(node_taint_test): apply node single creation convention for node1 2025-12-15 16:33:12 +01:00
Jan Chaloupka
ba034d6e0e refactor(node_taint_test): add node name constants 2025-12-15 16:23:38 +01:00
Jan Chaloupka
3289554f90 refactor(node_taint_test): add buildTestPod helper function 2025-12-15 16:20:01 +01:00
Jan Chaloupka
72575c2f23 refactor(node_taint_test): add buildTestNode helper function 2025-12-15 16:17:49 +01:00
Jan Chaloupka
07616c3fc0 refactor(TestRemovePodsHavingTooManyRestarts): make fmt 2025-12-15 16:14:50 +01:00
Jan Chaloupka
cad120881f refactor(TestRemovePodsViolatingNodeAffinity): apply pod single creation convention 2025-12-15 16:02:37 +01:00
Jan Chaloupka
aec4416099 refactor(TestRemovePodsViolatingNodeAffinity): add buildUnschedulableNodeWithLabels function 2025-12-15 15:59:01 +01:00
Jan Chaloupka
7b9d5d2539 refactor(TestRemovePodsViolatingNodeAffinity): inline nodeWithoutLabels 2025-12-15 15:50:25 +01:00
Jan Chaloupka
9f7629136f refactor(TestRemovePodsViolatingNodeAffinity): inline nodeWithLabels 2025-12-15 15:48:00 +01:00
Jan Chaloupka
42d255fd95 refactor(TestRemovePodsViolatingNodeAffinity): update addPodsToNode to accept nodeName 2025-12-15 15:33:58 +01:00
Jan Chaloupka
183a138d82 refactor(TestRemovePodsViolatingNodeAffinity): add constants for node names 2025-12-15 15:28:03 +01:00
Kubernetes Prow Robot
f669c45892 Merge pull request #1795 from ingvagabund/podantiaffinity-unit-test
refactor(TestPodAntiAffinity): inline object creation
2025-12-15 05:53:45 -08:00
Jan Chaloupka
a2ffbc1261 refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for podWithNodeAffinity 2025-12-15 14:35:05 +01:00
Jan Chaloupka
2cda1bd89d refactor(TestRemovePodsViolatingNodeAffinity): deduplicate pod creation with buildTestPod helper 2025-12-15 14:35:03 +01:00
Jan Chaloupka
691a1da43b refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for unschedulableNodeWithLabels 2025-12-15 14:34:59 +01:00
Jan Chaloupka
8fe74c7a0c refactor(TestRemovePodsViolatingNodeAffinity): apply unit test convention for nodeWithLabels 2025-12-15 14:34:54 +01:00
Jan Chaloupka
102bd6a91d refactor(TestRemovePodsViolatingNodeAffinity): deduplicate node creation with buildTestNode helper 2025-12-15 14:34:44 +01:00
Jan Chaloupka
3d1e15bb82 refactor(TestPodAntiAffinity): apply gofumpt formatting 2025-12-15 14:21:32 +01:00
Jan Chaloupka
3c02d9029c refactor(TestPodAntiAffinity): inline p1, p2, p4 builders 2025-12-15 14:13:51 +01:00
Jan Chaloupka
57a3e610a7 refactor(TestPodAntiAffinity): deduplicate p1, p3, p4 builders 2025-12-15 14:06:05 +01:00
Jan Chaloupka
7cec27d467 refactor(TestPodAntiAffinity): inline nonEvictablePod 2025-12-15 13:56:37 +01:00
Jan Chaloupka
688b45011a refactor(TestPodAntiAffinity): inline p11 2025-12-15 13:56:07 +01:00
Jan Chaloupka
a96451030c refactor(TestPodAntiAffinity): inline p10 2025-12-15 13:55:36 +01:00
Jan Chaloupka
a4930ebc83 refactor(TestPodAntiAffinity): inline p9 2025-12-15 13:55:11 +01:00
Jan Chaloupka
ad872f8b77 refactor(TestPodAntiAffinity): inline p8 2025-12-15 13:54:40 +01:00
Jan Chaloupka
a0654df270 refactor(TestPodAntiAffinity): inline p7 2025-12-15 13:54:07 +01:00
Jan Chaloupka
03b5a9a967 refactor(TestPodAntiAffinity): inline p6 2025-12-15 13:53:43 +01:00
Jan Chaloupka
9f2d22c1f7 refactor(TestPodAntiAffinity): inline p5 2025-12-15 13:53:19 +01:00
Jan Chaloupka
cbe1c1e559 refactor(TestPodAntiAffinity): inline p4 2025-12-15 13:52:21 +01:00
Jan Chaloupka
87182c5e8f refactor(TestPodAntiAffinity): inline p3 2025-12-15 13:51:58 +01:00
Jan Chaloupka
2765e31048 refactor(TestPodAntiAffinity): inline p2 2025-12-15 13:51:00 +01:00
Jan Chaloupka
87f675a2cd refactor(TestPodAntiAffinity): inline p1 2025-12-15 13:48:01 +01:00
Jan Chaloupka
a400a66d51 refactor(TestPodAntiAffinity): create dedicated builders for p1-p4 and nonEvictablePod 2025-12-15 13:43:11 +01:00
Jan Chaloupka
fa427a2b37 refactor(TestPodAntiAffinity): deduplicate setting Labels for foo1-bar1 2025-12-15 13:38:58 +01:00
Jan Chaloupka
90672630da refactor(TestPodAntiAffinity): deduplicate setting Labels for foo-bar 2025-12-15 13:37:55 +01:00
Jan Chaloupka
6a00214457 refactor(TestPodAntiAffinity): deduplicate setPodAntiAffinity for foo1-bar1 2025-12-15 13:36:27 +01:00
Jan Chaloupka
9413b0c654 refactor(TestPodAntiAffinity): deduplicate setPodAntiAffinity for foo-bar 2025-12-15 13:34:05 +01:00
Jan Chaloupka
3072a59ea0 refactor(TestPodAntiAffinity): ensure nonEvictablePod is created only through apply argument 2025-12-15 13:25:04 +01:00
Jan Chaloupka
0e56823865 refactor(TestPodAntiAffinity): ensure p11 is created only through apply argument 2025-12-15 13:24:09 +01:00
Jan Chaloupka
ea80f7d307 refactor(TestPodAntiAffinity): ensure p10 is created only through apply argument 2025-12-15 13:23:46 +01:00
Jan Chaloupka
6638b976ad refactor(TestPodAntiAffinity): ensure p9 is created only through apply argument 2025-12-15 13:23:10 +01:00
Jan Chaloupka
116385718f refactor(TestPodAntiAffinity): ensure p8 is created only through apply argument 2025-12-15 13:22:35 +01:00
Jan Chaloupka
5ad695166a refactor(TestPodAntiAffinity): ensure p7 is created only through apply argument 2025-12-15 13:22:12 +01:00
Jan Chaloupka
d5e0ec597f refactor(TestPodAntiAffinity): ensure p6 is created only through apply argument 2025-12-15 13:21:32 +01:00
Jan Chaloupka
4b86cdd31a refactor(TestPodAntiAffinity): ensure p5 is created only through apply argument 2025-12-15 13:20:53 +01:00
Jan Chaloupka
99527292e0 refactor(TestPodAntiAffinity): ensure p4 is created only through apply argument 2025-12-15 13:20:06 +01:00
Jan Chaloupka
cf79af6fba refactor(TestPodAntiAffinity): ensure p3 is created only through apply argument 2025-12-15 13:19:38 +01:00
Jan Chaloupka
da55c779f2 refactor(TestPodAntiAffinity): ensure p2 is created only through apply argument 2025-12-15 13:18:50 +01:00
Kubernetes Prow Robot
bc6500d917 Merge pull request #1794 from ingvagabund/toomanyrestarts-unit-test
refactor(TestRemovePodsHavingTooManyRestarts): inline object creation
2025-12-15 03:59:45 -08:00
Jan Chaloupka
c5b9debe56 refactor(TestPodAntiAffinity): ensure p1 is created only through apply argument 2025-12-15 12:57:19 +01:00
Jan Chaloupka
18f847bbe8 refactor(TestPodAntiAffinity): create buildTestPodForNode1 to deduplicate nodeName1 2025-12-15 12:54:42 +01:00
Jan Chaloupka
6e753ac5fb refactor(TestPodAntiAffinity): create buildTestPod helper to deduplicate 100 and 0 literals 2025-12-15 12:52:02 +01:00
Jan Chaloupka
b797ca6ba2 refactor(TestPodAntiAffinity): inline node2, node3, node4, and node5 2025-12-15 12:48:50 +01:00
Jan Chaloupka
4ffabad669 refactor(TestPodAntiAffinity): create buildTestNode1 and inline node1 2025-12-15 12:45:07 +01:00
Jan Chaloupka
bba62ccb93 refactor(TestPodAntiAffinity): extract setNodeMainRegionLabel helper 2025-12-15 12:40:46 +01:00
Jan Chaloupka
1f856595f5 refactor(TestPodAntiAffinity): add nodeName constants
refactor(TestPodAntiAffinity): replace node.Name with nodeName constants
2025-12-15 12:36:23 +01:00
Jan Chaloupka
993162dd44 refactor(TestPodAntiAffinity): replace test.BuildTestNode with buildTestNode helper 2025-12-15 12:30:30 +01:00
Jan Chaloupka
ee73336fd8 refactor(TestRemovePodsHavingTooManyRestarts): inline node5 2025-12-15 12:25:03 +01:00
Jan Chaloupka
75f655e271 refactor(TestRemovePodsHavingTooManyRestarts): inline node4 2025-12-15 12:24:46 +01:00
Jan Chaloupka
76895273f9 refactor(TestRemovePodsHavingTooManyRestarts): inline node3 2025-12-15 12:11:09 +01:00
Jan Chaloupka
35d2103fcf refactor(TestRemovePodsHavingTooManyRestarts): inline node2 2025-12-15 12:10:34 +01:00
Jan Chaloupka
b069ae009a refactor(TestRemovePodsHavingTooManyRestarts): inline node1 2025-12-15 12:08:58 +01:00
Jan Chaloupka
be275deea5 refactor(TestRemovePodsHavingTooManyRestarts): node3 as a constant 2025-12-15 12:06:47 +01:00
Jan Chaloupka
a5d3241a54 refactor(TestRemovePodsHavingTooManyRestarts): replace test.BuildTestNode with buildTestNode helpers 2025-12-15 12:05:44 +01:00
Jan Chaloupka
2af9ea8449 refactor(TestRemovePodsHavingTooManyRestarts): remove applyFunc and apply modifications in initPods 2025-12-15 12:04:20 +01:00
Jan Chaloupka
60fa5aa228 refactor(TestRemovePodsHavingTooManyRestarts): create all the pods as part of a unit test definition 2025-12-15 12:04:10 +01:00
Jan Chaloupka
a94d22fd1b refactor(TestRemovePodsHavingTooManyRestarts): create all testing pods under initPods 2025-12-15 12:03:59 +01:00
Jan Chaloupka
8c70b02088 refactor(TestRemovePodsHavingTooManyRestarts): single testing pods creation 2025-12-15 12:03:22 +01:00
Jan Chaloupka
ec58fed521 refactor(TestRemovePodsHavingTooManyRestarts): create each init container through a single invocation 2025-12-15 12:03:08 +01:00
Jan Chaloupka
bf9cf0ee1c refactor(TestRemovePodsHavingTooManyRestarts): use test.Set...OwnerRef instead 2025-12-15 11:03:45 +01:00
Jan Chaloupka
6ebb0b7aa7 refactor(TestRemovePodsHavingTooManyRestarts): extract setPodContainerStatusRestartCount helper 2025-12-15 00:23:03 +01:00
Kubernetes Prow Robot
bb01360776 Merge pull request #1793 from ingvagabund/duplicates-unit-test
refactor(TestRemoveDuplicates): reduce test code duplication
2025-12-14 08:35:44 -08:00
Jan Chaloupka
c8bc668e04 refactor(TestRemoveDuplicatesUniformly): reduce duplication in setTolerations 2025-12-14 17:05:42 +01:00
Jan Chaloupka
b64426888b refactor(TestRemoveDuplicatesUniformly): reduce duplication in setWorkerLabelSelector 2025-12-14 17:05:21 +01:00
Jan Chaloupka
1306cf38a1 refactor(TestRemoveDuplicatesUniformly): reduce duplication in setNotMasterNodeSelector 2025-12-14 17:04:18 +01:00
Jan Chaloupka
bc06d1be83 refactor: replace test.BuildTestPod with buildTestPodForNode 2025-12-14 16:40:55 +01:00
Jan Chaloupka
c9e87bb97d Merge pull request #1792 from ingvagabund/duplicates-unit-test
refactor(TestFindDuplicatePods): reduce duplicates and inline
2025-12-14 17:27:11 +02:00
Jan Chaloupka
05b6d5e343 refactor(TestFindDuplicatePods): remove leftover comments 2025-12-14 16:16:24 +01:00
Jan Chaloupka
044f75dcec refactor(TestFindDuplicatePods): inline node6 creation 2025-12-14 16:16:22 +01:00
Jan Chaloupka
6e62af3dbf refactor(TestFindDuplicatePods): inline node5 creation 2025-12-14 16:16:20 +01:00
Jan Chaloupka
2fac727be3 refactor(TestFindDuplicatePods): inline node4 creation 2025-12-14 16:16:17 +01:00
Jan Chaloupka
babc4137a4 refactor(TestFindDuplicatePods): inline node3 creation 2025-12-14 16:16:12 +01:00
Jan Chaloupka
fc033caf21 refactor(TestFindDuplicatePods): inline node2 creation 2025-12-14 16:16:10 +01:00
Jan Chaloupka
fd524f2172 refactor(TestFindDuplicatePods): inline node1 creation 2025-12-14 16:07:27 +01:00
Jan Chaloupka
47275831ab refactor(TestFindDuplicatePods): apply buildTestNode helper to node variables
refactor(TestRemoveDuplicatesUniformly): apply buildTestNode helper
2025-12-14 16:00:50 +01:00
Jan Chaloupka
b8b0fa0565 refactor(TestFindDuplicatePods): inline p20 pod creation 2025-12-14 14:27:05 +01:00
Jan Chaloupka
daaa5896a9 refactor(TestFindDuplicatePods): inline p19 pod creation 2025-12-14 14:26:23 +01:00
Jan Chaloupka
e27864717d refactor(TestFindDuplicatePods): inline p17 pod creation 2025-12-14 14:25:26 +01:00
Jan Chaloupka
e8cf01591e refactor(TestFindDuplicatePods): inline p16 pod creation 2025-12-14 14:24:36 +01:00
Jan Chaloupka
d7766cccfd refactor(TestFindDuplicatePods): inline p15 pod creation 2025-12-14 14:22:22 +01:00
Jan Chaloupka
3ebffe5a86 refactor(TestFindDuplicatePods): inline p13 pod creation 2025-12-14 14:20:48 +01:00
Jan Chaloupka
4e758c18e8 refactor(TestFindDuplicatePods): inline p12 pod creation 2025-12-14 14:19:19 +01:00
Jan Chaloupka
1c494f9c44 refactor(TestFindDuplicatePods): inline p11 pod creation 2025-12-14 14:17:37 +01:00
Jan Chaloupka
45dfe3011c refactor(TestFindDuplicatePods): inline p10 pod creation 2025-12-14 14:11:13 +01:00
Jan Chaloupka
eeb459d6d4 refactor(TestFindDuplicatePods): inline p9 pod creation 2025-12-14 14:10:18 +01:00
Jan Chaloupka
f3d91fc69f refactor(TestFindDuplicatePods): inline p8 pod creation 2025-12-14 14:04:23 +01:00
Jan Chaloupka
e9dcd4e54d refactor(TestFindDuplicatePods): inline p7 pod creation 2025-12-14 14:03:22 +01:00
Jan Chaloupka
8490ed9c8f refactor(TestFindDuplicatePods): inline p6 pod creation 2025-12-14 14:01:21 +01:00
Jan Chaloupka
01fb826bd3 refactor(TestFindDuplicatePods): inline p5 pod creation 2025-12-14 13:58:38 +01:00
Jan Chaloupka
9b50aa91f8 refactor(TestFindDuplicatePods): inline p4 pod creation 2025-12-14 13:44:24 +01:00
Jan Chaloupka
7a5bf8c2f0 refactor(TestFindDuplicatePods): inline p3 pod creation 2025-12-14 13:43:34 +01:00
Jan Chaloupka
df06442830 refactor(TestFindDuplicatePods): inline p2 pod creation 2025-12-14 13:39:56 +01:00
Jan Chaloupka
180548cc1a refactor(TestFindDuplicatePods): inline p1 pod creation 2025-12-14 13:37:23 +01:00
Jan Chaloupka
0aee6cff48 refactor(TestFindDuplicatePods): introduce buildTestPodWithRSOwnerRefWithNamespaceForNode1 helper 2025-12-14 13:33:56 +01:00
Jan Chaloupka
7a0257a682 refactor(TestFindDuplicatePods): introduce buildTestPodWithRSOwnerRefForNode1 helper 2025-12-14 13:25:40 +01:00
Jan Chaloupka
f5253faeb0 refactor(TestFindDuplicatePods): set owner refs through a dedicated function 2025-12-14 13:25:34 +01:00
Jan Chaloupka
59f499e2cd refactor(TestFindDuplicatePods): replace direct ownerRef assignment with test.SetRSOwnerRef 2025-12-14 13:25:14 +01:00
Jan Chaloupka
008265db9b refactor(TestFindDuplicatePods): consolidate ownerRef as all the cases produce the same owner reference 2025-12-14 13:25:09 +01:00
Jan Chaloupka
61190b805b refactor(TestFindDuplicatePods): buildTestPodForNode1 for default testing configuration 2025-12-14 13:24:03 +01:00
Jan Chaloupka
8c83840bf9 Merge pull request #1791 from ingvagabund/duplicates-unit-test
refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits
2025-12-14 14:05:17 +02:00
Jan Chaloupka
e46b5db6d5 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:35 +01:00
Jan Chaloupka
b21fb4a655 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:35 +01:00
Jan Chaloupka
8f3c5f4978 refactor(TestFindDuplicatePods): drop unused variable 2025-12-14 12:49:27 +01:00
Jan Chaloupka
6f94e19385 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
3bb99512d8 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
56f49bc78f refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:49:27 +01:00
Jan Chaloupka
800dd280cd refactor(TestFindDuplicatePods): drop unused variable 2025-12-14 12:49:17 +01:00
Jan Chaloupka
8dada79593 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:05 +01:00
Jan Chaloupka
660e2dba40 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:03 +01:00
Jan Chaloupka
294ce39231 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:46:01 +01:00
Jan Chaloupka
f2031ddcb0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:59 +01:00
Jan Chaloupka
7435b5d474 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:56 +01:00
Jan Chaloupka
b5f177efa0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:54 +01:00
Jan Chaloupka
4a4ec4afb7 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:52 +01:00
Jan Chaloupka
0c33be962d refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:50 +01:00
Jan Chaloupka
511ed214b0 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:48 +01:00
Jan Chaloupka
3d4263bf5e refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:45 +01:00
Jan Chaloupka
96171413ba refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:43 +01:00
Jan Chaloupka
5578211253 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:38 +01:00
Jan Chaloupka
08c2fc7621 refactor(TestFindDuplicatePods): have a pod fully created through BuildTestPod without any edits 2025-12-14 12:45:35 +01:00
Kubernetes Prow Robot
9e45259399 Merge pull request #1790 from ingvagabund/podlifetime-unit-tests
refactor(TestPodLifeTime): split the unit tests into smaller semantically close groups
2025-12-14 02:19:43 -08:00
Jan Chaloupka
e5bbedb602 refactor(TestPodLifeTime): extract generic filtering tests 2025-12-13 14:27:13 +01:00
Jan Chaloupka
2710fd3781 refactor(TestPodLifeTime): extract evictor configuration tests 2025-12-13 14:06:24 +01:00
Jan Chaloupka
2658864ac0 refactor(TestPodLifeTime): extract eviction limits tests 2025-12-13 14:06:18 +01:00
Jan Chaloupka
e05de87368 refactor(TestPodLifeTime): extract pod status reason tests 2025-12-13 14:06:10 +01:00
Jan Chaloupka
293a9ca4b7 refactor(TestPodLifeTime): extract container waiting reason tests 2025-12-13 14:06:04 +01:00
Jan Chaloupka
83151219e7 refactor(TestPodLifeTime): extract pod phase state tests 2025-12-13 14:05:18 +01:00
Jan Chaloupka
fb0bddf85d refactor(TestPodLifeTime): extract age threshold tests 2025-12-13 14:04:26 +01:00
Jan Chaloupka
286f2848fc refactor(TestPodLifeTime): add shared test infrastructure 2025-12-13 14:03:21 +01:00
Jan Chaloupka
d8d997a25d refactor(TestPodLifeTime): extract helper functions to package level 2025-12-13 13:58:26 +01:00
Kubernetes Prow Robot
5d7a483dc8 Merge pull request #1789 from ingvagabund/refactorings
feat(PodLifeTime): document the plugin with details that can be used for reasoning during reviews and design discussions
2025-12-12 04:54:12 -08:00
Jan Chaloupka
58076dd162 feat(PodLifeTime): document the plugin with details that can be used for
reasoning during reviews and design discussions
2025-12-12 11:55:27 +01:00
Kubernetes Prow Robot
b6e81fdd4b Merge pull request #1787 from ingvagabund/refactorings
feat(TestPodLifeTime): check only expected pods are evicted
2025-12-11 11:37:31 -08:00
Jan Chaloupka
59dfd041a8 feat(TestPodLifeTime): check only expected pods are evicted 2025-12-11 17:08:25 +01:00
Kubernetes Prow Robot
7bf29ce56d Merge pull request #1785 from ingvagabund/refactorings
refactor(TestPodLifeTime): update unit test names and simplify pod creation
2025-12-11 04:35:30 -08:00
Jan Chaloupka
c77f1a4ed2 refactor(TestPodLifeTime): update test names to better correspond to their purpose 2025-12-11 13:04:33 +01:00
Jan Chaloupka
7e14c6c7c4 refactor(TestPodLifeTime): drop applyPodsFunc function 2025-12-11 13:03:47 +01:00
Jan Chaloupka
b4a0b8dbac fix(TestPodLifeTime): rename dev ns to default 2025-12-11 13:03:40 +01:00
Kubernetes Prow Robot
680b10099d Merge pull request #1784 from ingvagabund/refactorings
refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates
2025-12-10 09:15:41 -08:00
Jan Chaloupka
e92dda1a37 Merge pull request #1783 from ingvagabund/refactorings
refactor(TestPodLifeTime): consolidations, simplifications and node instance for each unit test
2025-12-10 17:38:21 +02:00
Jan Chaloupka
07dc0c61c5 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:33:28 +01:00
Jan Chaloupka
cab310e55c refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:54 +01:00
Jan Chaloupka
822a1d4c40 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:30 +01:00
Jan Chaloupka
1d7368b58d refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:32:05 +01:00
Jan Chaloupka
70a71f54bc refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:31:31 +01:00
Jan Chaloupka
3ea0eadcb3 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:31:25 +01:00
Jan Chaloupka
41a0a9c994 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:30:30 +01:00
Jan Chaloupka
c707f53cec refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 16:28:20 +01:00
Jan Chaloupka
9be42e50cc refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:55:12 +01:00
Jan Chaloupka
bed39d70f0 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:54:04 +01:00
Jan Chaloupka
8a0fd10315 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:53:19 +01:00
Jan Chaloupka
5e6cd6057b refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:52:24 +01:00
Jan Chaloupka
b857869371 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:47:08 +01:00
Jan Chaloupka
3e764eb564 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:45:53 +01:00
Jan Chaloupka
2648749eb8 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:45:10 +01:00
Jan Chaloupka
ff43002060 refactor(TestPodLifeTime): inline pod creation in each unit test to avoid accidental pod spec updates 2025-12-10 15:43:07 +01:00
Jan Chaloupka
4f42a7ae9b refactor(TestPodLifeTime): consolidate defaults when building a test pod with RS owner reference 2025-12-10 15:34:48 +01:00
Jan Chaloupka
7d84b68556 refactor(TestPodLifeTime): consolidate defaults when building a test pod 2025-12-10 15:29:12 +01:00
Jan Chaloupka
5b4719634c refactor(TestPodLifeTime): the default pod namespace will work the same way as the 'dev' one 2025-12-10 15:29:03 +01:00
Jan Chaloupka
94a0fbdcbb refactor(TestPodLifeTime): inline node creation in each unit test to avoid accidental node spec updates 2025-12-10 15:28:57 +01:00
Jan Chaloupka
bbc3eef1c9 refactor(TestPodLifeTime): replace test.GetReplicaSetOwnerRefList with test.SetRSOwnerRef
To make the assigment shorter and unified
2025-12-10 15:28:51 +01:00
Jan Chaloupka
3a3e72e9c2 refactor(TestPodLifeTime): consolidate all owner references
test.GetReplicaSetOwnerRefList produces the same value everything it's
invoked.
2025-12-10 15:28:45 +01:00
Kubernetes Prow Robot
e6c14a365f Merge pull request #1782 from ingvagabund/refactorings
refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits
2025-12-10 06:23:30 -08:00
Kubernetes Prow Robot
2b2ab0b9ad Merge pull request #1781 from ingvagabund/podlifetime-unittest-dry
refactor(TestPodLifeTime): remove ineffective owner references assignments
2025-12-10 05:39:30 -08:00
Jan Chaloupka
16b9311e9e refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:23 +01:00
Jan Chaloupka
1a61470e81 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:14 +01:00
Jan Chaloupka
c02779b6a5 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:09 +01:00
Jan Chaloupka
ff6363692c refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:04 +01:00
Jan Chaloupka
34540c3c95 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:13:00 +01:00
Jan Chaloupka
ee40f7ff30 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:55 +01:00
Jan Chaloupka
cece2ee3cc refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:50 +01:00
Jan Chaloupka
fbdf86fdfd refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:39 +01:00
Jan Chaloupka
7bfd4088ce refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:12:05 +01:00
Jan Chaloupka
18f61b5e64 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:12 +01:00
Jan Chaloupka
769b4fe34a refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:07 +01:00
Jan Chaloupka
6ffc7e3975 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:11:03 +01:00
Jan Chaloupka
31af0d8223 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:59 +01:00
Jan Chaloupka
0c80f3689d refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:53 +01:00
Jan Chaloupka
9722018847 refactor(TestPodLifeTime): have a pod fully created through BuildTestPod without any edits 2025-12-10 14:10:48 +01:00
Jan Chaloupka
47cfdf7057 refactor(TestPodLifeTime): remove ineffective owner references assignments
Pods p5 and p6 already have an owner assigned. Also,
test.GetReplicaSetOwnerRefList() produces the same list of owner references.
2025-12-10 14:08:06 +01:00
Kubernetes Prow Robot
db6d460677 Merge pull request #1764 from abelfodil/master
Fix "Current requires cgo or $USER set in environment" error
2025-11-15 00:25:38 -08:00
Anes Belfodil
237d9c1a7b fix: provide USER env var to correctly initialize tracing
This is done to prevent "Current requires cgo or $USER set in environment" error during
tracing initialization.
2025-11-15 01:40:08 -05:00
Kubernetes Prow Robot
5b66733ada Merge pull request #1772 from Sycrosity/master
docs: fix README.md link to kubernetes bot commands
2025-11-07 20:52:51 -08:00
Kubernetes Prow Robot
eb1b91d085 Merge pull request #1773 from petersalas/update-readme
docs: fix incorrect gracePeriodSeconds default in README.md
2025-11-07 20:22:52 -08:00
Peter Salas
058056d965 docs: fix incorrect gracePeriodSeconds default in README.md 2025-11-04 12:11:02 -08:00
Sycrosity
f9aa969791 docs: fix link to kubernetes bot commands page 2025-11-03 00:29:17 +00:00
Sycrosity
4bbfa08dfb docs: Have kustomize suggested commands use latest release 2025-11-03 00:25:22 +00:00
Kubernetes Prow Robot
4b7c2c90ea Merge pull request #1771 from a7i/native-sidecar
fix: pod resource calculation to consider native sidecars
2025-11-02 02:06:02 -08:00
Amir Alavi
06cab8e2aa fix: pod resource calculation to consider native sidecars
previously, descheduler code had copied an old version of PodRequestsAndLimits which does not consider native sidecars
it will now rely on resourcehelper libs, which will continue to get upstream updates

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-11-01 10:48:00 -04:00
Kubernetes Prow Robot
582641c2e9 Merge pull request #1752 from ricardomaraschini/create-protection-for-pods-using-storage-class
feat: enable pod protection based on storage classes
2025-10-30 14:18:03 -07:00
Kubernetes Prow Robot
4d78cd49a0 Merge pull request #1770 from a7i/descheduler-chart-v0.34.0
[v0.34.0] update helm chart
2025-10-30 06:56:05 -07:00
Amir Alavi
ce56624cea [v0.34.0] update helm chart 2025-10-29 22:02:10 -04:00
Kubernetes Prow Robot
dd7b76f2c4 Merge pull request #1768 from a7i/v0.34.0-docs-manifests
[v0.34.0] update docs and manifests
2025-10-29 17:54:01 -07:00
Amir Alavi
bc4f17194b [v0.34.0] update docs and manifests
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-10-29 12:20:31 -04:00
Ricardo Maraschini
d9d6ca64e9 feat: enable pod protection based on storage classes
this commit introduces a new customization on the existing PodsWithPVC
protection. this new customization allow users to make pods that refer
to a given storage class unevictable.

for example, to protect pods referring to `storage-class-0` and
`storage-class-1` this configuration can be used:

```yaml
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
  pluginConfig:
  - name: "DefaultEvictor"
    args:
      podProtections:
        extraEnabled:
        - PodsWithPVC
        config:
          PodsWithPVC:
            protectedStorageClasses:
            - name: storage-class-0
            - name: storage-class-1
```

changes introduced by this pr:

1. the descheduler starts to observe persistent volume claims.
1. a new api field was introduced to allow per pod protection config.
1. rbac had to be adjusted (+persistentvolumeclaims).
2025-10-29 11:21:10 +01:00
Kubernetes Prow Robot
ebaf155e23 Merge pull request #1765 from googs1025/helm_chart
docs: use podProtections args in helm chart
2025-10-29 01:02:01 -07:00
googs1025
781572fed5 use podProtections args
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-10-28 10:29:52 +08:00
Kubernetes Prow Robot
e3503d22f4 Merge pull request #1751 from rstribrn/fix/#1750_chart_livenessprobe
helm: Fix liveness probe timeout for descheduler chart
2025-10-21 12:00:39 -07:00
Kubernetes Prow Robot
564c2c29d8 Merge pull request #1759 from ricardomaraschini/add-myself-as-approver
chore: update approvers
2025-10-21 08:02:43 -07:00
Kubernetes Prow Robot
bbb915e003 Merge pull request #1744 from a7i/amir/k8s-1.34
[v0.34.0] bump to kubernetes 1.34 deps
2025-10-21 07:32:47 -07:00
Amir Alavi
1db6b615d1 [v0.34.0] bump to kubernetes 1.34 deps
Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-10-21 09:14:13 -04:00
Kubernetes Prow Robot
e9188852ef Merge pull request #1762 from tiraboschi/test_1.34
test: run by default with kind 0.30.0 and kubevirt v1.6.2
2025-10-21 05:28:40 -07:00
Simone Tiraboschi
c36466bb1c test: run by default with kind 0.30.0 and kubevirt v1.6.2
bump kind and kubevirt versions to unblock the k8s 1.34
e2e test lane.

Signed-off-by: Simone Tiraboschi <stirabos@redhat.com>
2025-10-21 13:44:34 +02:00
Ricardo Maraschini
841fd29282 chore: update approvers
add myself as an approver on this repository.
2025-10-17 14:59:21 +02:00
Kubernetes Prow Robot
79b2e04199 Merge pull request #1753 from tiraboschi/annotate_eviction_requests
feat(eviction): add annotations to eviction requests for observability
2025-10-13 04:26:57 -07:00
Simone Tiraboschi
3a608a590a feat(eviction): add annotations to eviction requests for observability
Although eviction requests (policy/v1) are not persisted long term,
their API still implements the full metav1.ObjectMeta struct. While
name and namespace refer to the pod being evicted, eviction requests
can still carry annotations.

This change adds annotations to descheduler-initiated evictions,
including the requester, reason, and the strategy or plugin that
triggered them.

While these details are already logged by the descheduler, exposing them
as annotations allows external webhooks or controllers to provide
clearer context about each eviction request, both for tracking and
prioritization purposes.

Signed-off-by: Simone Tiraboschi <stirabos@redhat.com>
2025-10-13 12:49:03 +02:00
Kubernetes Prow Robot
07b1c4e681 Merge pull request #1755 from tsj-30/fix/handle-single-node
descheduler: handle single-node clusters gracefully
2025-10-13 01:36:56 -07:00
S J Tharun
7d6f6fedec fix gofmt error 2025-10-13 08:29:27 +05:30
S J Tharun
3033aec6a0 descheduler: handle single-node clusters gracefully 2025-10-11 07:53:41 +05:30
Rostislav Stříbrný
9eb582cd67 helm: Fix liveness probe timeout for descheduler chart 2025-09-30 14:52:40 +02:00
Kubernetes Prow Robot
925b388702 Merge pull request #1746 from eminaktas/fix/correct-function-comment
fix: correct comment for HaveNoEvictionAnnotation
2025-09-30 03:10:18 -07:00
Kubernetes Prow Robot
e599018adb Merge pull request #1747 from ricardomaraschini/fix-context-cancellation
chore: give tracing.Shutdown() its own context
2025-09-25 07:54:18 -07:00
Ricardo Maraschini
f9a3be8cde chore: give tracing.Shutdown() its own context
the tracing.Shutdown() uses the context so we must guarantee that the
context we use is valid regardless if the original context being
cancelled already.

this change introduces an exclusive context for the shutdown process
with an arbitrary timeout.
2025-09-24 12:37:54 +02:00
Emin Aktas
d47e077897 fix: correct comment for HaveNoEvictionAnnotation 2025-09-20 12:53:56 +03:00
Kubernetes Prow Robot
483c9c1499 Merge pull request #1741 from anthosz/deploy-annotation
helm chart - Allow to add custom annotations for deployment
2025-09-18 12:24:11 -07:00
Anthony SCHWARTZ
d841a2d913 helm chart - Allow to add custom annotations for deployment
Revamp code
2025-09-17 22:27:18 +02:00
Kubernetes Prow Robot
f7f86ed075 Merge pull request #1743 from ingvagabund/test-e2e-new-args
feat(test/e2e): introduce new flags to override RunAsUser and RunAsGroup security context setting
2025-09-17 02:54:13 -07:00
Kubernetes Prow Robot
46f55b5221 Merge pull request #1742 from ricardomaraschini/add-pull-request-template
chore: add pull request template
2025-09-17 01:28:13 -07:00
Jan Chaloupka
fa9fa70ed7 feat(test/e2e): introduce new flags to override RunAsUser and RunAsGroup security context setting
So RunAsUser and RunAsGroup can be either omitted and set to different values
2025-09-17 10:23:57 +02:00
Ricardo Maraschini
43dc9a9616 chore: add pull request template
adds a list of things to be checked by the pr creator and reviewers.
2025-09-16 12:12:15 +02:00
Kubernetes Prow Robot
f8c8d9a385 Merge pull request #1736 from n2h9/chore-1732-chart-add-cronjob-job-annotations-and-labels
[1732] chore: add cronjob/job labels and annotations to helm chart
2025-08-29 09:09:11 -07:00
Nikita B
b5bb15ae10 [1732] chore: add cronjob/job labels and annotations to helm chart
Signed-off-by: Nikita B <n2h9z4@gmail.com>
2025-08-23 14:49:28 +02:00
Kubernetes Prow Robot
94d1825c68 Merge pull request #1725 from googs1025/chore/evictionConstraints
standardize protectionEnabled param across eviction constraints
2025-08-19 05:53:36 -07:00
googs1025
257bd55909 standardize protectionEnabled param 2025-08-19 17:52:54 +08:00
Kubernetes Prow Robot
fd2febbfe1 Merge pull request #1641 from jmederer/master
feat(token): automountServiceAccountToken set hard to true on deployment
2025-08-15 05:05:11 -07:00
Kubernetes Prow Robot
63ecaec0ef Merge pull request #1729 from googs1025/validate/improve
Improves the validation logic in the plugins
2025-08-15 03:31:07 -07:00
Kubernetes Prow Robot
4740bc3e27 Merge pull request #1728 from googs1025/chore/podprotections/readme
update readme for PodProtections
2025-08-15 02:35:07 -07:00
googs1025
8f521bb6f7 update readme for PodProtections
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-08-15 13:26:25 +08:00
Jan Mederer
d641488ea1 feat(token): automountServiceAccountToken - The Option can now be controlled via values.yaml or VolumeMounts
Signed-off-by: Jan Mederer <jan@mederer.it>
2025-08-14 08:34:55 +02:00
Kubernetes Prow Robot
e5c57a759b Merge pull request #1603 from googs1025/feature/add_dra_arg
feature: add PodsWithResourceClaims parameter in DefaultEvictorArgs PodProtections
2025-08-13 06:57:10 -07:00
googs1025
9c7e01de67 feature: add PodsWithResourceClaims parameter in DefaultEvictorArgs PodProtections
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-08-13 20:35:36 +08:00
Kubernetes Prow Robot
eb4c7d102f Merge pull request #1733 from googs1025/namesapce_includes
add ValidateHighNodeUtilizationPluginConfig unit test
2025-08-13 05:03:08 -07:00
googs1025
e5ea03ce75 add ValidateHighNodeUtilizationPluginConfig unit test
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-08-13 17:15:21 +08:00
googs1025
2cce141feb Improves the validation logic in the plugins
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-08-07 17:21:01 +08:00
Kubernetes Prow Robot
f2211e1cef Merge pull request #1665 from googs1025/refator/evict_arg
add PodProtections for DefaultEvictorArgs
2025-08-05 02:59:40 -07:00
googs1025
d9697591d5 add EvictionProtection filed for DefaultEvictorArgs
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-08-04 19:34:16 +08:00
Kubernetes Prow Robot
419fe74702 Merge pull request #1723 from ingvagabund/default-evictor-no-eviction-policy
Default evictor no eviction policy
2025-07-31 05:13:16 -07:00
Jan Chaloupka
7380aa6e0a DefaultEvictor: introduce no-eviction policy
NoEvictionPolicy dictates whether a no-eviction policy is prefered or mandatory.
Needs to be used with caution as this will give users ability to protect their pods
from eviction. Which might work against enfored policies. E.g. plugins evicting pods
violating security policies.
2025-07-24 15:47:05 +02:00
Kubernetes Prow Robot
b84b2623b9 Merge pull request #1722 from ingvagabund/minor-refactoring
Test code refactorings
2025-07-24 06:38:28 -07:00
Jan Chaloupka
d0548b75d7 TestSortPodsBasedOnPriorityLowToHigh: check the whole sorted list of pods 2025-07-24 12:48:03 +02:00
Jan Chaloupka
6e9d8891c5 defaultevictor_test.go: replace descheduler.alpha.kubernetes.io/evict literal with evictPodAnnotationKey const 2025-07-24 12:48:00 +02:00
Jan Chaloupka
5cc9e68127 Drop assignment if default test values 2025-07-24 12:04:07 +02:00
Kubernetes Prow Robot
bf6a51f733 Merge pull request #1660 from schahal/chore/run-policies-thru-tpl
feat(helm): run descedulerPolicy thru tpl func for more chart control
2025-07-17 02:30:24 -07:00
Kubernetes Prow Robot
f15d8d0d54 Merge pull request #1647 from dongjiang1989/fix-secure-port-0-panic
fix: Fix panic in descheduler when using `--secure-port=0`
2025-07-15 03:54:22 -07:00
Kubernetes Prow Robot
a177744169 Merge pull request #1719 from a7i/amir/fix-sort-evictable
fix: removepodsviolatingtopologyspreadconstraint to favor evictable pods when balancing domains
2025-07-15 00:48:24 -07:00
Amir Alavi
eadfe4a546 fix: topologyspreadconstraint plugin to not add PodNodeAffinity unless the inclusion policy is honor 2025-07-10 17:42:57 -04:00
Amir Alavi
fbf11df729 fix: topologyspreadconstraint to prefer evictable before sorting domains
Sort pods that are above ideal avg based on the criteria that they fit on other nodes that are below avg

Signed-off-by: Amir Alavi <amiralavi7@gmail.com>
2025-07-10 17:42:12 -04:00
Kubernetes Prow Robot
e5ab156a99 Merge pull request #1717 from ricardomaraschini/remove-unecessary-assignments
chore: stop with no-op default evictor settings
2025-07-09 18:21:27 -07:00
Ricardo Maraschini
6e714a2134 chore: stop with no-op default evictor settings
we have been carrying these no-op for quite a while now. we should only
set defaults when they are different from what they are being provided
by the user.
2025-07-07 15:31:49 +02:00
Kubernetes Prow Robot
a01fa87de8 Merge pull request #1709 from googs1025/add/field
add activeDeadlineSeconds field for cronjob
2025-06-30 07:12:32 -07:00
googs1025
ba694cfac1 add activeDeadlineSeconds field for cronjob
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-06-30 21:45:44 +08:00
Kubernetes Prow Robot
2570bedd52 Merge pull request #1708 from ingvagabund/minor-adjustments
logger: Align with the previous logger verbosity
2025-06-25 00:56:29 -07:00
Jan Chaloupka
89eab59d82 logger: Align with the previous logger verbosity 2025-06-25 09:27:57 +02:00
Kubernetes Prow Robot
ace001c618 Merge pull request #1655 from googs1025/feature/contextual_logging
feature: use contextal logging for plugins
2025-06-25 00:16:29 -07:00
googs1025
33894afe2b feature: use contextal logging for plugins
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-06-13 19:45:55 +08:00
Kubernetes Prow Robot
528aff2d42 Merge pull request #1705 from ricardomaraschini/allow-prometheus-over-http
feat(prometheus): allow different url schemes
2025-06-12 03:44:55 -07:00
Ricardo Maraschini
a2b899aa15 feat(prometheus): allow different url schemes
as per prometheus golang client implementation: the only url validation
done is by means of an `url.Parse()` call. we should do the same and not
enforce the usage of https scheme.

our readme even shows an example of descheduler config using http
prometheus url scheme.
2025-06-12 11:23:09 +02:00
Kubernetes Prow Robot
a1ddb3f28f Merge pull request #1232 from dragon-flyings/metrics
metrics name refact
2025-06-11 09:44:56 -07:00
lowang-bh
b63b09089e metrics name refact and mark old one deprecated
Signed-off-by: lowang-bh <lhui_wang@163.com>
2025-06-02 14:13:46 +08:00
Kubernetes Prow Robot
2d7528411a Merge pull request #1696 from doctapp/PodLifeTimeAllStates
*1677 Allow Succeeded and Failed states in PodLifeTime
2025-05-25 12:32:35 -07:00
Martin Tapp
e06443ef40 *1677 Allow Succeeded and Failed states in PodLifeTime 2025-05-23 15:56:37 -04:00
Kubernetes Prow Robot
9f918371a2 Merge pull request #1694 from ingvagabund/plugins-new-context
Extend plugin's New with a context.Context
2025-05-19 05:31:14 -07:00
Kubernetes Prow Robot
c8912acfb7 Merge pull request #1693 from googs1025/constraints_refactor
refactor: separate eviction constraints to constraints.go
2025-05-19 03:31:15 -07:00
Jan Chaloupka
1974c12e0f Extend plugin's New with a context.Context
The new context.Context can be later used for passing a contextualized
logger. Or, other initialization steps that require the context.
2025-05-19 12:23:44 +02:00
googs1025
b3aeca73db refactor: separate eviction constraints to constraints.go
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-05-19 18:02:41 +08:00
Kubernetes Prow Robot
d34848086c Merge pull request #1691 from googs1025/fix/listall
fix(example): list only active pod
2025-05-12 10:55:14 -07:00
Kubernetes Prow Robot
9aa6d79c21 Merge pull request #1688 from ingvagabund/plugins-taints-do-not-list-all-pods
RemovePodsViolatingNodeTaints: list only pods that are not failed/suceeded
2025-05-12 06:55:18 -07:00
googs1025
7a76d9f0d3 fix(RemovePodsViolatingNodeTaints): list only active pod
Signed-off-by: googs1025 <googs1025@gmail.com>
2025-05-12 21:45:57 +08:00
Kubernetes Prow Robot
71746262b1 Merge pull request #1684 from googs1025/refactor_topology
chore: move namespaces filtering logic to New()
2025-05-11 12:29:14 -07:00
Kubernetes Prow Robot
8b0ae7ce52 Merge pull request #1686 from googs1025/add_sort
feature: sort pods by restarts count in RemovePodsHavingTooManyRestarts plugin
2025-05-11 08:49:15 -07:00
Jan Chaloupka
6691720da5 RemovePodsViolatingNodeTaints: list only pods that are not failed/suceeded
Listing pods was incorrectly changed to listing all pods during code
refactoring.
2025-05-10 21:12:06 +02:00
googs1025
0a691debfb feature: sort pods by restarts count in RemovePodsHavingTooManyRestarts plugin 2025-05-09 13:15:38 +08:00
googs1025
fbc875fac1 chore: move namespaces filtering logic to New() 2025-05-07 19:47:30 +08:00
Kubernetes Prow Robot
e466307d7c Merge pull request #1681 from googs1025/use_priorityclasslister
optimize: NodeFit function by reordering checks for performance
2025-05-07 01:01:16 -07:00
dongjiang
9fed73148c Merge branch 'kubernetes-sigs:master' into fix-secure-port-0-panic 2025-05-06 10:06:37 +08:00
googs1025
957c5bc8e0 optimize: NodeFit function by reordering checks for performance 2025-05-05 21:09:14 +08:00
dongjiang
5d4dc6604a Update run-e2e-tests.sh 2025-04-30 21:51:12 +08:00
Satbir Chahal
603473839a chore(helm): run descedulerPolicy thru tpl func for more chart control 2025-04-03 05:30:17 -07:00
dongjiang
581d997379 fix panic
Signed-off-by: dongjiang <dongjiang1989@126.com>
2025-03-12 18:04:33 +08:00
1386 changed files with 108269 additions and 43743 deletions

22
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,22 @@
## Description
<!-- Please include a summary of the change and which issue is fixed -->
## Checklist
Please ensure your pull request meets the following criteria before submitting
for review, these items will be used by reviewers to assess the quality and
completeness of your changes:
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
- [ ] **Error Handling**: Are errors handled appropriately ?
- [ ] **Testing**: Are there sufficient unit/integration tests?
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
- [ ] **Dependencies**: Are new dependencies justified ?
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?

View File

@@ -7,11 +7,11 @@ jobs:
deploy:
strategy:
matrix:
k8s-version: ["v1.33.0"]
descheduler-version: ["v0.33.0"]
k8s-version: ["v1.34.0"]
descheduler-version: ["v0.34.0"]
descheduler-api: ["v1alpha2"]
manifest: ["deployment"]
kind-version: ["v0.27.0"] # keep in sync with test/run-e2e-tests.sh
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
runs-on: ubuntu-latest
steps:
- name: Checkout Repo

View File

@@ -0,0 +1,30 @@
# Descheduler Design Constraints
This is a slowly growing document that lists good practices, conventions, and design decisions.
## Overview
TBD
## Code convention
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
## Unit Test Conventions
These are the known conventions that are useful to practice whenever reasonable:
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
The aim is to reduce cognitive load when reading and debugging the test code.
## Design Decisions FAQ
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.24.2
FROM golang:1.24.6
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .

1
OWNERS
View File

@@ -4,6 +4,7 @@ approvers:
- seanmalloy
- a7i
- knelasevero
- ricardomaraschini
reviewers:
- damemi
- seanmalloy

125
README.md
View File

@@ -33,11 +33,12 @@ but relies on the default scheduler for that.
## ⚠️ Documentation Versions by Release
If you are using a published release of Descheduler (such as
`registry.k8s.io/descheduler/descheduler:v0.33.0`), follow the documentation in
`registry.k8s.io/descheduler/descheduler:v0.34.0`), follow the documentation in
that version's release branch, as listed below:
|Descheduler Version|Docs link|
|---|---|
|v0.34.x|[`release-1.34`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.34/README.md)|
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
@@ -93,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.34' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.34' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.34' | kubectl apply -f -
```
## User Guide
@@ -128,7 +129,7 @@ These are top level keys in the Descheduler Policy that you can use to configure
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
| `gracePeriodSeconds` | `int` | `nil` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. |
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
@@ -148,20 +149,70 @@ In general, each plugin can consume metrics from a different provider so multipl
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
| Name |type| Default Value | Description |
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
| `evictDaemonSetPods` | bool | false | allows eviction of DaemonSet managed Pods. |
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
| Name | Type | Default Value | Description |
|---------------------------|------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `nodeSelector` | `string` | `nil` | Limits the nodes that are processed. |
| `evictLocalStoragePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithLocalStorage"` instead]**<br>Allows eviction of pods using local storage. |
| `evictDaemonSetPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"DaemonSetPods"` instead]**<br>Allows eviction of DaemonSet managed Pods. |
| `evictSystemCriticalPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"SystemCriticalPods"` instead]**<br>[Warning: Will evict Kubernetes system pods] Allows eviction of pods with any priority, including system-critical pods like kube-dns. |
| `ignorePvcPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithPVC"` instead]**<br>Sets whether PVC pods should be evicted or ignored. |
| `evictFailedBarePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"FailedBarePods"` instead]**<br>Allows eviction of pods without owner references and in a failed phase. |
| `ignorePodsWithoutPDB` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithoutPDB"` instead]**<br>Sets whether pods without PodDisruptionBudget should be evicted or ignored. |
| `labelSelector` | `metav1.LabelSelector` | | (See [label filtering](#label-filtering)) |
| `priorityThreshold` | `priorityThreshold` | | (See [priority filtering](#priority-filtering)) |
| `nodeFit` | `bool` | `false` | (See [node fit filtering](#node-fit-filtering)) |
| `minReplicas` | `uint` | `0` | Ignores eviction of pods where the owner (e.g., `ReplicaSet`) replicas are below this threshold. |
| `minPodAge` | `metav1.Duration` | `0` | Ignores eviction of pods with a creation time within this threshold. |
| `noEvictionPolicy` | `enum` | `` | sets whether a `descheduler.alpha.kubernetes.io/prefer-no-eviction` pod annotation is considered preferred or mandatory. Accepted values: "", "Preferred", "Mandatory". Defaults to "Preferred". |
| `podProtections` | `PodProtections` | `{}` | Holds the list of enabled and disabled protection pod policies.<br>Users can selectively disable certain default protection rules or enable extra ones. See below for supported values. |
#### Supported Values for `podProtections.DefaultDisabled`
> Setting a value in `defaultDisabled` **disables the corresponding default protection rule**. This means the specified type of Pods will **no longer be protected** from eviction and may be evicted if they meet other criteria.
| Value | Meaning |
|--------------------------|-------------------------------------------------------------------------|
| `"PodsWithLocalStorage"` | Allow eviction of Pods using local storage. |
| `"DaemonSetPods"` | Allow eviction of DaemonSet-managed Pods. |
| `"SystemCriticalPods"` | Allow eviction of system-critical Pods. |
| `"FailedBarePods"` | Allow eviction of failed bare Pods (without controllers). |
---
#### Supported Values for `podProtections.ExtraEnabled`
> Setting a value in `extraEnabled` **enables an additional protection rule**. This means the specified type of Pods will be **protected** from eviction.
| Value | Meaning |
|----------------------------|------------------------------------------------------------------|
| `"PodsWithPVC"` | Prevents eviction of Pods using Persistent Volume Claims (PVCs). |
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
#### Protecting pods using specific Storage Classes
With the `PodsWithPVC` protection enabled all pods using PVCs are protected from eviction by default, if needed you can restrict the protection by filtering by PVC storage class. When filtering out by storage class, only pods using PVCs with the specified storage classes are protected from eviction. For example:
```yaml
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- PodsWithPVC
config:
PodsWithPVC:
protectedStorageClasses:
- name: storage-class-0
- name: storage-class-1
```
This example will protect pods using PVCs with storage classes `storage-class-0` and `storage-class-1` from eviction.
### Example policy
@@ -193,9 +244,17 @@ profiles:
pluginConfig:
- name: "DefaultEvictor"
args:
evictSystemCriticalPods: true
evictFailedBarePods: true
evictLocalStoragePods: true
podProtections:
defaultDisabled:
#- "PodsWithLocalStorage"
#- "SystemCriticalPods"
#- "DaemonSetPods"
#- "FailedBarePods"
extraEnabled:
#- "PodsWithPVC"
#- "PodsWithoutPDB"
#- "PodsWithResourceClaims"
config: {}
nodeFit: true
minReplicas: 2
plugins:
@@ -727,7 +786,9 @@ profiles:
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
> The primary purpose for using states like `Succeeded` and `Failed` is releasing resources so that new pods can be rescheduled.
> I.e., the main motivation is not for cleaning pods, rather to release resources.
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Succeeded`, `Failed`, `Unknown`
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
@@ -1011,12 +1072,16 @@ never evicted because these pods won't be recreated. (Standalone pods in failed
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
* All types of pods with the `descheduler.alpha.kubernetes.io/evict` annotation are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated.
The annotation only affects internal descheduler checks.
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
subresource is still respected.
* Pods with the `descheduler.alpha.kubernetes.io/prefer-no-eviction` annotation voice their preference not to be evicted.
Each plugin decides whether the annotation gets respected or not. When the `DefaultEvictor` plugin sets `noEvictionPolicy`
to `Mandatory` all such pods are excluded from eviction. Needs to be used with caution as some plugins may enfore
various policies that are expected to be always met.
* Pods with a non-nil DeletionTimestamp are not evicted by default.
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
@@ -1048,9 +1113,12 @@ To get best results from HA mode some additional configurations might require:
| name | type | description |
|---------------------------------------|--------------|-----------------------------------------------------------------------------------|
| build_info | gauge | constant 1 |
| pods_evicted | CounterVec | total number of pods evicted |
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
| pods_evicted | CounterVec | total number of pods evicted, is deprecated in version v0.34.0 |
| pods_evicted_total | CounterVec | total number of pods evicted |
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
| loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
| strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
The metrics are served through https://localhost:10258/metrics by default.
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
@@ -1066,6 +1134,7 @@ packages that it is compiled with.
| Descheduler | Supported Kubernetes Version |
|-------------|------------------------------|
| v0.34 | v1.34 |
| v0.33 | v1.33 |
| v0.32 | v1.32 |
| v0.31 | v1.31 |
@@ -1109,7 +1178,7 @@ that the only people who can get things done around here are the "maintainers".
We also would love to add more "official" maintainers, so show us what you can
do!
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
This repository uses the Kubernetes bots. See a full list of the commands [here](https://go.k8s.io/bot-commands).
### Communicating With Contributors

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.33.0
appVersion: 0.33.0
version: 0.34.0
appVersion: 0.34.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -70,6 +70,10 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |

View File

@@ -10,5 +10,5 @@ data:
policy.yaml: |
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
kind: "DeschedulerPolicy"
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
{{- end }}

View File

@@ -4,8 +4,15 @@ kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
{{- if .Values.cronJobAnnotations }}
annotations:
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
{{- end }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.cronJobLabels }}
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
{{- end }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
@@ -25,10 +32,24 @@ spec:
timeZone: {{ .Values.timeZone }}
{{- end }}
jobTemplate:
{{- if or .Values.jobAnnotations .Values.jobLabels }}
metadata:
{{- if .Values.jobAnnotations }}
annotations:
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.jobLabels }}
labels:
{{- .Values.jobLabels | toYaml | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.ttlSecondsAfterFinished }}
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
{{- end }}
{{- if .Values.activeDeadlineSeconds }}
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
{{- end }}
template:
metadata:
name: {{ template "descheduler.fullname" . }}
@@ -67,6 +88,9 @@ spec:
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@@ -100,6 +124,9 @@ spec:
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
@@ -108,4 +135,7 @@ spec:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
{{- end }}
{{- end }}

View File

@@ -6,6 +6,9 @@ metadata:
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.annotations }}
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
{{- end }}
spec:
{{- if gt (.Values.replicas | int) 1 }}
{{- if not .Values.leaderElection.enabled }}
@@ -39,6 +42,9 @@ spec:
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
@@ -75,6 +81,9 @@ spec:
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
@@ -83,6 +92,9 @@ spec:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@@ -1,6 +1,9 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- end }}
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}

View File

@@ -0,0 +1,109 @@
suite: Test Descheduler CronJob and Job Annotations and Labels
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: adds cronJob and job annotations and labels when set
template: templates/cronjob.yaml
set:
cronJobAnnotations:
monitoring.company.com/scrape: "true"
description: "test cronjob"
cronJobLabels:
environment: "test"
team: "platform"
jobAnnotations:
sidecar.istio.io/inject: "false"
job.company.com/retry-limit: "3"
jobLabels:
job-type: "maintenance"
priority: "high"
asserts:
- equal:
path: metadata.annotations["monitoring.company.com/scrape"]
value: "true"
- equal:
path: metadata.annotations.description
value: "test cronjob"
- equal:
path: metadata.labels.environment
value: "test"
- equal:
path: metadata.labels.team
value: "platform"
- equal:
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
value: "false"
- equal:
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
value: "3"
- equal:
path: spec.jobTemplate.metadata.labels.job-type
value: "maintenance"
- equal:
path: spec.jobTemplate.metadata.labels.priority
value: "high"
- it: does not add cronJob and job metadata when not set
template: templates/cronjob.yaml
asserts:
- isNull:
path: metadata.annotations
- isNotNull:
path: metadata.labels
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: descheduler
- isNull:
path: spec.jobTemplate.metadata
- it: does not add job metadata when job annotations and labels are empty
template: templates/cronjob.yaml
set:
jobAnnotations: {}
jobLabels: {}
asserts:
- isNull:
path: spec.jobTemplate.metadata
- it: works with all annotation and label types together
template: templates/cronjob.yaml
set:
cronJobAnnotations:
cron-annotation: "cron-value"
cronJobLabels:
cron-label: "cron-value"
jobAnnotations:
job-annotation: "job-value"
jobLabels:
job-label: "job-value"
podAnnotations:
pod-annotation: "pod-value"
podLabels:
pod-label: "pod-value"
asserts:
- equal:
path: metadata.annotations.cron-annotation
value: "cron-value"
- equal:
path: metadata.labels.cron-label
value: "cron-value"
- equal:
path: spec.jobTemplate.metadata.annotations.job-annotation
value: "job-value"
- equal:
path: spec.jobTemplate.metadata.labels.job-label
value: "job-value"
- equal:
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
value: "pod-value"
- equal:
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
value: "pod-value"

View File

@@ -55,7 +55,8 @@ suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
# failedJobsHistoryLimit: 1
# ttlSecondsAfterFinished 600
# ttlSecondsAfterFinished: 600
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
# timeZone: Etc/UTC
# Required when running as a Deployment
@@ -107,8 +108,11 @@ deschedulerPolicy:
pluginConfig:
- name: DefaultEvictor
args:
ignorePvcPods: true
evictLocalStoragePods: true
podProtections:
defaultDisabled:
- "PodsWithLocalStorage"
extraEnabled:
- "PodsWithPVC"
- name: RemoveDuplicates
- name: RemovePodsHavingTooManyRestarts
args:
@@ -193,6 +197,25 @@ serviceAccount:
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
# Opt out of API credential automounting
#
# automountServiceAccountToken Default is not set
# automountServiceAccountToken: true
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
# Default is not set - but only implied by the ServiceAccount
# automountServiceAccountToken: true
# Annotations that'll be applied to deployment
deploymentAnnotations: {}
cronJobAnnotations: {}
cronJobLabels: {}
jobAnnotations: {}
jobLabels: {}
podAnnotations: {}
@@ -206,8 +229,9 @@ livenessProbe:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
initialDelaySeconds: 5
periodSeconds: 20
timeoutSeconds: 5
service:
enabled: false
@@ -244,3 +268,30 @@ serviceMonitor:
# targetLabel: nodename
# replacement: $1
# action: replace
## Additional Volume mounts when automountServiceAccountToken is false
# extraServiceAccountVolumeMounts:
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
# name: kube-api-access
# readOnly: true
## Additional Volumes when automountServiceAccountToken is false
# extraServiceAccountVolumes:
# - name: kube-api-access
# projected:
# defaultMode: 0444
# sources:
# - configMap:
# items:
# - key: ca.crt
# path: ca.crt
# name: kube-root-ca.crt
# - downwardAPI:
# items:
# - fieldRef:
# apiVersion: v1
# fieldPath: metadata.namespace
# path: namespace
# - serviceAccountToken:
# expirationSeconds: 3600
# path: token

View File

@@ -144,8 +144,10 @@ func (rs *DeschedulerServer) Apply() error {
return err
}
secureServing.DisableHTTP2 = !rs.EnableHTTP2
rs.SecureServingInfo = secureServing
if secureServing != nil {
secureServing.DisableHTTP2 = !rs.EnableHTTP2
rs.SecureServingInfo = secureServing
}
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"io"
"os/signal"
"syscall"
"time"
"github.com/spf13/cobra"
@@ -97,17 +98,28 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err
var stoppedCh <-chan struct{}
var err error
if rs.SecureServingInfo != nil {
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err
}
}
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil {
klog.ErrorS(err, "failed to create tracer provider")
}
defer tracing.Shutdown(ctx)
defer func() {
// we give the tracing.Shutdown() its own context as the
// original context may have been cancelled already. we
// have arbitrarily chosen the timeout duration.
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
tracing.Shutdown(ctx)
}()
// increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods
@@ -118,8 +130,10 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
}
done()
// wait for metrics server to close
<-stoppedCh
if stoppedCh != nil {
// wait for metrics server to close
<-stoppedCh
}
return nil
}

View File

@@ -4,6 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |

113
go.mod
View File

@@ -1,57 +1,60 @@
module sigs.k8s.io/descheduler
go 1.24.2
go 1.24.0
toolchain go1.24.3
godebug default=go1.24
require (
github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.7.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.62.0
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
go.opentelemetry.io/otel v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
go.opentelemetry.io/otel/sdk v1.33.0
go.opentelemetry.io/otel/trace v1.33.0
google.golang.org/grpc v1.68.1
k8s.io/api v0.33.0
k8s.io/apimachinery v0.33.0
k8s.io/apiserver v0.33.0
k8s.io/client-go v0.33.0
k8s.io/code-generator v0.33.0
k8s.io/component-base v0.33.0
k8s.io/component-helpers v0.33.0
github.com/prometheus/common v0.64.0
github.com/spf13/cobra v1.10.0
github.com/spf13/pflag v1.0.9
go.opentelemetry.io/otel v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
go.opentelemetry.io/otel/sdk v1.36.0
go.opentelemetry.io/otel/trace v1.36.0
google.golang.org/grpc v1.72.2
k8s.io/api v0.34.0
k8s.io/apimachinery v0.34.0
k8s.io/apiserver v0.34.0
k8s.io/client-go v0.34.0
k8s.io/code-generator v0.34.0
k8s.io/component-base v0.34.0
k8s.io/component-helpers v0.34.0
k8s.io/klog/v2 v2.130.1
k8s.io/metrics v0.33.0
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
k8s.io/metrics v0.34.0
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
kubevirt.io/api v1.3.0
kubevirt.io/client-go v1.3.0
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
sigs.k8s.io/mdtoc v1.1.0
sigs.k8s.io/yaml v1.4.0
sigs.k8s.io/yaml v1.6.0
)
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
require (
cel.dev/expr v0.19.1 // indirect
cel.dev/expr v0.24.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-kit/kit v0.13.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
@@ -66,13 +69,12 @@ require (
github.com/golang/protobuf v1.5.4 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.23.2 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@@ -81,53 +83,56 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/protobuf v1.36.5 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.30.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
k8s.io/kms v0.33.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
k8s.io/kms v0.34.0 // indirect
k8s.io/kube-openapi v0.30.0 // indirect
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b

282
go.sum
View File

@@ -1,23 +1,20 @@
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -36,8 +33,8 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -49,9 +46,9 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -59,10 +56,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
@@ -108,8 +105,8 @@ github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/K
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
@@ -132,11 +129,10 @@ github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -149,10 +145,9 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -163,20 +158,21 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@@ -212,9 +208,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
@@ -246,6 +242,7 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
@@ -274,6 +271,7 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -282,12 +280,14 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@@ -297,10 +297,12 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0=
github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
@@ -317,6 +319,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
@@ -330,48 +333,53 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -387,8 +395,10 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
@@ -410,6 +420,7 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -450,11 +461,13 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -468,8 +481,9 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -513,9 +527,12 @@ golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -534,8 +551,10 @@ golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -551,8 +570,10 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -580,6 +601,9 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -592,17 +616,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -614,11 +636,11 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -641,53 +663,50 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4=
k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o=
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees=
k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0=
k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
k8s.io/component-helpers v0.34.0 h1:5T7P9XGMoUy1JDNKzHf0p/upYbeUf8ZaSf9jbx0QlIo=
k8s.io/component-helpers v0.34.0/go.mod h1:kaOyl5tdtnymriYcVZg4uwDBe2d1wlIpXyDkt6sVnt4=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.33.0 h1:fhQSW/vyaWDhMp0vDuO/sLg2RlGZf4F77beSXcB4/eE=
k8s.io/kms v0.33.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
k8s.io/metrics v0.33.0 h1:sKe5sC9qb1RakMhs8LWYNuN2ne6OTCWexj8Jos3rO2Y=
k8s.io/metrics v0.33.0/go.mod h1:XewckTFXmE2AJiP7PT3EXaY7hi7bler3t2ZLyOdQYzU=
k8s.io/kms v0.34.0 h1:u+/rcxQ3Jr7gC9AY5nXuEnBcGEB7ZOIJ9cdLdyHyEjQ=
k8s.io/kms v0.34.0/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
k8s.io/metrics v0.34.0 h1:nYSfG2+tnL6/MRC2I+sGHjtNEGoEoM/KktgGOoQFwws=
k8s.io/metrics v0.34.0/go.mod h1:KCuXmotE0v4AvoARKUP8NC4lUnbK/Du1mluGdor5h4M=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
@@ -700,18 +719,19 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUo
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@@ -35,6 +35,9 @@ rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["nodes", "pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "watch", "list"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0
image: registry.k8s.io/descheduler/descheduler:v0.34.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0
image: registry.k8s.io/descheduler/descheduler:v0.34.0
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0
image: registry.k8s.io/descheduler/descheduler:v0.34.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -31,10 +31,18 @@ const (
var (
PodsEvicted = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
}, []string{"result", "strategy", "profile", "namespace", "node"})
PodsEvictedTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
Name: "pods_evicted_total",
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "profile", "namespace", "node"})
@@ -49,18 +57,36 @@ var (
)
DeschedulerLoopDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_loop_duration_seconds",
Help: "Time taken to complete a full descheduling cycle",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
}, []string{})
LoopDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_loop_duration_seconds",
Name: "loop_duration_seconds",
Help: "Time taken to complete a full descheduling cycle",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
}, []string{})
DeschedulerStrategyDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_strategy_duration_seconds",
Help: "Time taken to complete Each strategy of the descheduling operation",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
}, []string{"strategy", "profile"})
StrategyDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_strategy_duration_seconds",
Name: "strategy_duration_seconds",
Help: "Time taken to complete Each strategy of the descheduling operation",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
@@ -68,9 +94,12 @@ var (
metricsList = []metrics.Registerable{
PodsEvicted,
PodsEvictedTotal,
buildInfo,
DeschedulerLoopDuration,
DeschedulerStrategyDuration,
LoopDuration,
StrategyDuration,
}
)

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -74,6 +74,7 @@ import (
const (
prometheusAuthTokenSecretKey = "prometheusAuthToken"
workQueueKey = "key"
indexerNodeSelectorGlobal = "indexer_node_selector_global"
)
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
@@ -164,7 +165,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
) // Used by the defaultevictor plugin
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
@@ -206,15 +207,20 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
}
if rs.MetricsClient != nil {
nodeSelector := labels.Everything()
if deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
if err != nil {
return nil, err
}
nodeSelector = sel
nodeSelector := labels.Everything()
if deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
if err != nil {
return nil, err
}
nodeSelector = sel
}
if err := nodeutil.AddNodeSelectorIndexer(sharedInformerFactory.Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
return nil, err
}
if rs.MetricsClient != nil {
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
}
@@ -345,20 +351,15 @@ func (d *descheduler) eventHandler() cache.ResourceEventHandler {
}
}
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
func (d *descheduler) runDeschedulerLoop(ctx context.Context) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
defer span.End()
defer func(loopStartDuration time.Time) {
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
}(time.Now())
// if len is still <= 1 error out
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
return fmt.Errorf("the cluster size is 0 or 1")
}
var client clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
@@ -383,6 +384,22 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
nodeSelector := labels.Everything()
if d.deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*d.deschedulerPolicy.NodeSelector)
if err != nil {
return err
}
nodeSelector = sel
}
// TODO(ingvagabund): copy paste all relevant indexers from the real client to the fake one
// TODO(ingvagabund): register one indexer per each profile. Respect the precedence of no profile-level node selector is specified.
// Also, keep a cache of node label selectors to detect duplicates to avoid creating an extra informer.
if err := nodeutil.AddNodeSelectorIndexer(fakeSharedInformerFactory.Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
return err
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
@@ -398,7 +415,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
d.podEvictor.SetClient(client)
d.podEvictor.ResetCounters()
d.runProfiles(ctx, client, nodes)
d.runProfiles(ctx, client)
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
@@ -408,13 +425,35 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
// runProfiles runs all the deschedule plugins of all profiles and
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
// see https://github.com/kubernetes-sigs/descheduler/issues/979
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface) {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End()
nodesAsInterface, err := d.sharedInformerFactory.Core().V1().Nodes().Informer().GetIndexer().ByIndex(indexerNodeSelectorGlobal, indexerNodeSelectorGlobal)
if err != nil {
span.AddEvent("Failed to list nodes with global node selector", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
return
}
nodes, err := nodeutil.ReadyNodesFromInterfaces(nodesAsInterface)
if err != nil {
span.AddEvent("Failed to convert node as interfaces into ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
return
}
// if len is still <= 1 error out
if len(nodes) <= 1 {
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
return // gracefully skip this cycle instead of aborting
}
var profileRunners []profileRunner
for _, profile := range d.deschedulerPolicy.Profiles {
for idx, profile := range d.deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
ctx,
profile,
pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client),
@@ -423,6 +462,9 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
frameworkprofile.WithMetricsCollector(d.metricsCollector),
frameworkprofile.WithPrometheusClient(d.prometheusClient),
// Generate a unique instance ID using just the index to avoid long IDs
// when profile names are very long
frameworkprofile.WithProfileInstanceID(fmt.Sprintf("%d", idx)),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
@@ -585,11 +627,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
var eventClient clientset.Interface
if rs.DryRun {
eventClient = fakeclientset.NewSimpleClientset()
@@ -664,14 +701,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End()
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
if err != nil {
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
cancel()
return
}
err = descheduler.runDeschedulerLoop(sCtx, nodes)
err = descheduler.runDeschedulerLoop(sCtx)
if err != nil {
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)

View File

@@ -177,7 +177,7 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
}
}
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, dryRun bool, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
client := fakeclientset.NewSimpleClientset(objects...)
eventClient := fakeclientset.NewSimpleClientset(objects...)
@@ -189,6 +189,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
rs.EventClient = eventClient
rs.DefaultFeatureGates = featureGates
rs.MetricsClient = metricsClient
rs.DryRun = dryRun
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
@@ -477,70 +478,72 @@ func taintNodeNoSchedule(node *v1.Node) {
func TestPodEvictorReset(t *testing.T) {
initPluginRegistry()
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
tests := []struct {
name string
dryRun bool
cycles []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}
}{
{
name: "real mode",
dryRun: false,
cycles: []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}{
{expectedTotalEvicted: 2, expectedRealEvictions: 2, expectedFakeEvictions: 0},
{expectedTotalEvicted: 2, expectedRealEvictions: 4, expectedFakeEvictions: 0},
},
},
{
name: "dry mode",
dryRun: true,
cycles: []struct {
expectedTotalEvicted uint
expectedRealEvictions int
expectedFakeEvictions int
}{
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 2},
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 4},
},
},
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx)
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
defer cancel()
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, test.SetRSOwnerRef)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, test.SetRSOwnerRef)
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx)
_, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, tc.dryRun, node1, node2, p1, p2)
defer cancel()
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
// a single pod eviction expected
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
}
// a single pod eviction expected
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
// check the fake client syncing and the right pods evicted
klog.Infof("Enabling the dry run mode")
rs.DryRun = true
evictedPods = []string{}
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
for i, cycle := range tc.cycles {
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Cycle %d: Unable to run a descheduling loop: %v", i+1, err)
}
if descheduler.podEvictor.TotalEvicted() != cycle.expectedTotalEvicted || len(evictedPods) != cycle.expectedRealEvictions || len(fakeEvictedPods) != cycle.expectedFakeEvictions {
t.Fatalf("Cycle %d: Expected (%v,%v,%v) pods evicted, got (%v,%v,%v) instead", i+1, cycle.expectedTotalEvicted, cycle.expectedRealEvictions, cycle.expectedFakeEvictions, descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
}
}
})
}
}
@@ -555,7 +558,7 @@ func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, to
}
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
err := descheduler.runDeschedulerLoop(ctx, nodes)
err := descheduler.runDeschedulerLoop(ctx)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
@@ -595,7 +598,7 @@ func TestEvictionRequestsCache(t *testing.T) {
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, false, node1, node2, p1, p2, p3, p4)
defer cancel()
var fakeEvictedPods []string
@@ -731,13 +734,12 @@ func TestDeschedulingLimits(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ctxCancel, cancel := context.WithCancel(ctx)
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, false, node1, node2)
defer cancel()
var fakeEvictedPods []string
@@ -774,7 +776,7 @@ func TestDeschedulingLimits(t *testing.T) {
time.Sleep(100 * time.Millisecond)
klog.Infof("2 evictions in background expected, 2 normal evictions")
err := descheduler.runDeschedulerLoop(ctx, nodes)
err := descheduler.runDeschedulerLoop(ctx)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
@@ -790,6 +792,219 @@ func TestDeschedulingLimits(t *testing.T) {
}
}
func TestNodeLabelSelectorBasedEviction(t *testing.T) {
initPluginRegistry()
// createNodes creates 4 nodes with different labels and applies a taint to all of them
createNodes := func() (*v1.Node, *v1.Node, *v1.Node, *v1.Node) {
taint := []v1.Taint{
{
Key: "test-taint",
Value: "test-value",
Effect: v1.TaintEffectNoSchedule,
},
}
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
node.Labels = map[string]string{
"zone": "us-east-1a",
"node-type": "compute",
"environment": "production",
}
node.Spec.Taints = taint
})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.Labels = map[string]string{
"zone": "us-east-1b",
"node-type": "compute",
"environment": "production",
}
node.Spec.Taints = taint
})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.Labels = map[string]string{
"zone": "us-west-1a",
"node-type": "storage",
"environment": "staging",
}
node.Spec.Taints = taint
})
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
node.Labels = map[string]string{
"zone": "us-west-1b",
"node-type": "storage",
"environment": "staging",
}
node.Spec.Taints = taint
})
return node1, node2, node3, node4
}
tests := []struct {
description string
nodeSelector string
dryRun bool
expectedEvictedFromNodes []string
}{
{
description: "Evict from n1, n2",
nodeSelector: "environment=production",
dryRun: false,
expectedEvictedFromNodes: []string{"n1", "n2"},
},
{
description: "Evict from n1, n2 in dry run mode",
nodeSelector: "environment=production",
dryRun: true,
expectedEvictedFromNodes: []string{"n1", "n2"},
},
{
description: "Evict from n3, n4",
nodeSelector: "environment=staging",
dryRun: false,
expectedEvictedFromNodes: []string{"n3", "n4"},
},
{
description: "Evict from n3, n4 in dry run mode",
nodeSelector: "environment=staging",
dryRun: true,
expectedEvictedFromNodes: []string{"n3", "n4"},
},
{
description: "Evict from n1, n4",
nodeSelector: "zone in (us-east-1a, us-west-1b)",
dryRun: false,
expectedEvictedFromNodes: []string{"n1", "n4"},
},
{
description: "Evict from n1, n4 in dry run mode",
nodeSelector: "zone in (us-east-1a, us-west-1b)",
dryRun: true,
expectedEvictedFromNodes: []string{"n1", "n4"},
},
{
description: "Evict from n2, n3",
nodeSelector: "zone in (us-east-1b, us-west-1a)",
dryRun: false,
expectedEvictedFromNodes: []string{"n2", "n3"},
},
{
description: "Evict from n2, n3 in dry run mode",
nodeSelector: "zone in (us-east-1b, us-west-1a)",
dryRun: true,
expectedEvictedFromNodes: []string{"n2", "n3"},
},
{
description: "Evict from all nodes",
nodeSelector: "",
dryRun: false,
expectedEvictedFromNodes: []string{"n1", "n2", "n3", "n4"},
},
{
description: "Evict from all nodes in dry run mode",
nodeSelector: "",
dryRun: true,
expectedEvictedFromNodes: []string{"n1", "n2", "n3", "n4"},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx := context.Background()
// Create nodes with different labels and taints
node1, node2, node3, node4 := createNodes()
ownerRef := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = ownerRef
pod.Status.Phase = v1.PodRunning
}
// Create one pod per node
p1 := test.BuildTestPod("p1", 200, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 200, 0, node2.Name, updatePod)
p3 := test.BuildTestPod("p3", 200, 0, node3.Name, updatePod)
p4 := test.BuildTestPod("p4", 200, 0, node4.Name, updatePod)
// Map pod names to their node names for validation
podToNode := map[string]string{
"p1": "n1",
"p2": "n2",
"p3": "n3",
"p4": "n4",
}
policy := removePodsViolatingNodeTaintsPolicy()
if tc.nodeSelector != "" {
policy.NodeSelector = &tc.nodeSelector
}
ctxCancel, cancel := context.WithCancel(ctx)
_, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, tc.dryRun, node1, node2, node3, node4, p1, p2, p3, p4)
defer cancel()
// Verify all pods are created initially
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(pods.Items) != 4 {
t.Errorf("Expected 4 pods initially, got %d", len(pods.Items))
}
var evictedPods []string
if !tc.dryRun {
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
} else {
deschedulerInstance.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&evictedPods, nil, nil)
}
}
// Run descheduler
if err := deschedulerInstance.runDeschedulerLoop(ctx); err != nil {
t.Fatalf("Unable to run descheduler loop: %v", err)
}
// Collect which nodes had pods evicted from them
nodesWithEvictedPods := make(map[string]bool)
for _, podName := range evictedPods {
if nodeName, ok := podToNode[podName]; ok {
nodesWithEvictedPods[nodeName] = true
}
}
// Verify the correct number of nodes had pods evicted
if len(nodesWithEvictedPods) != len(tc.expectedEvictedFromNodes) {
t.Errorf("Expected pods to be evicted from %d nodes, got %d nodes: %v", len(tc.expectedEvictedFromNodes), len(nodesWithEvictedPods), nodesWithEvictedPods)
}
// Verify pods were evicted from the correct nodes
for _, nodeName := range tc.expectedEvictedFromNodes {
if !nodesWithEvictedPods[nodeName] {
t.Errorf("Expected pod to be evicted from node %s, but it was not", nodeName)
}
}
// Verify no unexpected nodes had pods evicted
for nodeName := range nodesWithEvictedPods {
found := false
for _, expectedNode := range tc.expectedEvictedFromNodes {
if nodeName == expectedNode {
found = true
break
}
}
if !found {
t.Errorf("Unexpected eviction from node %s", nodeName)
}
}
t.Logf("Successfully evicted pods from nodes: %v", tc.expectedEvictedFromNodes)
})
}
}
func TestLoadAwareDescheduling(t *testing.T) {
initPluginRegistry()
@@ -801,7 +1016,6 @@ func TestLoadAwareDescheduling(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
@@ -850,6 +1064,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
initFeatureGates(),
policy,
metricsClientset,
false,
node1, node2, p1, p2, p3, p4, p5)
defer cancel()
@@ -857,7 +1072,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
// after newDescheduler in RunDeschedulerStrategies.
descheduler.metricsCollector.Collect(ctx)
err := descheduler.runDeschedulerLoop(ctx, nodes)
err := descheduler.runDeschedulerLoop(ctx)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}

View File

@@ -42,6 +42,12 @@ import (
"sigs.k8s.io/descheduler/pkg/tracing"
)
const (
deschedulerGlobalName = "sigs.k8s.io/descheduler"
reasonAnnotationKey = "reason"
requestedByAnnotationKey = "requested-by"
)
var (
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
@@ -482,6 +488,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
err := NewEvictionTotalLimitError()
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
@@ -496,6 +503,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
@@ -510,6 +518,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
err := NewEvictionNamespaceLimitError(pod.Namespace)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
@@ -519,13 +528,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
return err
}
ignore, err := pe.evictPod(ctx, pod)
ignore, err := pe.evictPod(ctx, pod, opts)
if err != nil {
// err is used only for logging purposes
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
metrics.PodsEvictedTotal.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
@@ -545,6 +555,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
metrics.PodsEvictedTotal.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
}
if pe.dryRun {
@@ -564,7 +575,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
}
// return (ignore, err)
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) (bool, error) {
deleteOptions := &metav1.DeleteOptions{
GracePeriodSeconds: pe.gracePeriodSeconds,
}
@@ -577,6 +588,10 @@ func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{
"reason": fmt.Sprintf("triggered by %v/%v: %v", opts.ProfileName, opts.StrategyName, opts.Reason),
"requested-by": deschedulerGlobalName,
},
},
DeleteOptions: deleteOptions,
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"fmt"
"reflect"
"strings"
"testing"
"time"
@@ -114,7 +115,7 @@ func TestEvictPod(t *testing.T) {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
_, got := podEvictor.evictPod(ctx, test.evictedPod)
_, got := podEvictor.evictPod(ctx, test.evictedPod, EvictOptions{})
if got != test.wantErr {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
}
@@ -418,7 +419,11 @@ func TestEvictionRequestsCacheCleanup(t *testing.T) {
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
podName := eviction.GetName()
if podName == "p1" || podName == "p2" {
annotations := eviction.GetAnnotations()
if (podName == "p1" || podName == "p2") && annotations[requestedByAnnotationKey] == deschedulerGlobalName && strings.HasPrefix(
annotations[reasonAnnotationKey],
"triggered by",
) {
return true, nil, &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonTooManyRequests,

View File

@@ -17,12 +17,17 @@ limitations under the License.
package utils
import (
corev1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
)
const (
EvictionKind = "Eviction"
EvictionSubresource = "pods/eviction"
// A new experimental feature for soft no-eviction preference.
// Each plugin will decide whether the soft preference will be respected.
// If configured the soft preference turns into a mandatory no-eviction policy for the DefaultEvictor plugin.
SoftNoEvictionAnnotationKey = "descheduler.alpha.kubernetes.io/prefer-no-eviction"
)
// SupportEviction uses Discovery API to find out if the server support eviction subresource
@@ -56,3 +61,9 @@ func SupportEviction(client clientset.Interface) (string, error) {
}
return "", nil
}
// HaveNoEvictionAnnotation checks if the pod have soft no-eviction annotation
func HaveNoEvictionAnnotation(pod *corev1.Pod) bool {
_, found := pod.ObjectMeta.Annotations[SoftNoEvictionAnnotationKey]
return found
}

View File

@@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
@@ -78,6 +79,22 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister list
return readyNodes, nil
}
// ReadyNodesFromInterfaces converts a list of interface{} items to ready nodes.
// Each interface{} item is expected to be a *v1.Node. Only ready nodes are returned.
func ReadyNodesFromInterfaces(nodeInterfaces []interface{}) ([]*v1.Node, error) {
readyNodes := make([]*v1.Node, 0, len(nodeInterfaces))
for i, nodeInterface := range nodeInterfaces {
node, ok := nodeInterface.(*v1.Node)
if !ok {
return nil, fmt.Errorf("item at index %d is not a *v1.Node", i)
}
if IsReady(node) {
readyNodes = append(readyNodes, node)
}
}
return readyNodes, nil
}
// IsReady checks if the descheduler could run against given node.
func IsReady(node *v1.Node) bool {
for i := range node.Status.Conditions {
@@ -105,20 +122,29 @@ func IsReady(node *v1.Node) bool {
return true
}
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
// NodeFit returns nil if the provided pod can be scheduled onto the provided node.
// Otherwise, it returns an error explaining why the node does not fit the pod.
//
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
// deciding if a pod would fit on a node, but more predicates may be added in the future.
// There should be no methods to modify nodes or pods in this method.
// It considers a subset of the Kubernetes Scheduler's predicates
// when deciding if a pod would fit on a node. More predicates may be added in the future.
//
// The checks are ordered from fastest to slowest to reduce unnecessary computation,
// especially for nodes that are clearly unsuitable early in the evaluation process.
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
// Check node selector and required affinity
// Check if the node is marked as unschedulable.
if IsNodeUnschedulable(node) {
return errors.New("node is not schedulable")
}
// Check if the pod matches the node's label selector (nodeSelector) and required node affinity rules.
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
return err
} else if !ok {
return errors.New("pod node selector does not match the node label")
}
// Check taints (we only care about NoSchedule and NoExecute taints)
// Check taints on the node that have effect NoSchedule or NoExecute.
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
})
@@ -126,25 +152,21 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
return errors.New("pod does not tolerate taints on the node")
}
// Check if the pod can fit on a node based off it's requests
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
return reqError
}
}
// Check if node is schedulable
if IsNodeUnschedulable(node) {
return errors.New("node is not schedulable")
}
// Check if pod matches inter-pod anti-affinity rule of pod on node
// Check if the pod violates any inter-pod anti-affinity rules with existing pods on the node.
// This involves iterating over all pods assigned to the node and evaluating label selectors.
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
return err
} else if match {
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
}
// Check whether the node has enough available resources to accommodate the pod.
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
return reqError
}
}
return nil
}
@@ -236,7 +258,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
return false, fmt.Errorf("insufficient %v", resource)
}
}
// check pod num, at least one pod number is avaibalbe
// check pod num, at least one pod number is available
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
}
@@ -395,3 +417,22 @@ func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFun
return false, nil
}
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
func AddNodeSelectorIndexer(nodeInformer cache.SharedIndexInformer, indexerName string, nodeSelector labels.Selector) error {
return nodeInformer.AddIndexers(cache.Indexers{
indexerName: func(obj interface{}) ([]string, error) {
node, ok := obj.(*v1.Node)
if !ok {
return []string{}, errors.New("unexpected object")
}
if nodeSelector.Matches(labels.Set(node.Labels)) {
return []string{indexerName}, nil
}
return []string{}, nil
},
})
}

View File

@@ -19,15 +19,20 @@ package node
import (
"context"
"errors"
"sort"
"strings"
"sync"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/ptr"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
@@ -78,13 +83,205 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
// First verify nodeLister returns non-empty list
allNodes, err := nodeLister.List(labels.Everything())
if err != nil {
t.Fatalf("Failed to list nodes from nodeLister: %v", err)
}
if len(allNodes) == 0 {
t.Fatal("Expected nodeLister to return non-empty list of nodes")
}
if len(allNodes) != 2 {
t.Errorf("Expected nodeLister to return 2 nodes, got %d", len(allNodes))
}
// Now test ReadyNodes
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
if nodes[0].Name != "node1" {
if len(nodes) != 1 {
t.Errorf("Expected 1 node, got %d", len(nodes))
} else if nodes[0].Name != "node1" {
t.Errorf("Expected node1, got %s", nodes[0].Name)
}
}
func TestReadyNodesFromInterfaces(t *testing.T) {
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
node3 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
tests := []struct {
description string
nodeInterfaces []interface{}
expectedCount int
expectedNames []string
expectError bool
errorContains string
}{
{
description: "All nodes are ready",
nodeInterfaces: []interface{}{node1, node3},
expectedCount: 2,
expectedNames: []string{"node1", "node3"},
expectError: false,
},
{
description: "One node is not ready",
nodeInterfaces: []interface{}{node1, node2, node3},
expectedCount: 2,
expectedNames: []string{"node1", "node3"},
expectError: false,
},
{
description: "Empty list",
nodeInterfaces: []interface{}{},
expectedCount: 0,
expectedNames: []string{},
expectError: false,
},
{
description: "Invalid type in list",
nodeInterfaces: []interface{}{node1, "not a node", node3},
expectedCount: 0,
expectError: true,
errorContains: "item at index 1 is not a *v1.Node",
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
nodes, err := ReadyNodesFromInterfaces(tc.nodeInterfaces)
if tc.expectError {
if err == nil {
t.Errorf("Expected error but got none")
} else if tc.errorContains != "" && !strings.Contains(err.Error(), tc.errorContains) {
t.Errorf("Expected error to contain '%s', got '%s'", tc.errorContains, err.Error())
}
return
}
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if len(nodes) != tc.expectedCount {
t.Errorf("Expected %d nodes, got %d", tc.expectedCount, len(nodes))
}
for i, expectedName := range tc.expectedNames {
if i >= len(nodes) {
t.Errorf("Missing node at index %d, expected %s", i, expectedName)
continue
}
if nodes[i].Name != expectedName {
t.Errorf("Expected node at index %d to be %s, got %s", i, expectedName, nodes[i].Name)
}
}
})
}
}
func TestAddNodeSelectorIndexer(t *testing.T) {
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
node1.Labels = map[string]string{"type": "compute", "zone": "us-east-1"}
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
node2.Labels = map[string]string{"type": "infra", "zone": "us-west-1"}
node3 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
node3.Labels = map[string]string{"type": "compute", "zone": "us-west-1"}
tests := []struct {
description string
indexerName string
selectorString string
expectedMatches []string
}{
{
description: "Index nodes by type=compute",
indexerName: "computeNodes",
selectorString: "type=compute",
expectedMatches: []string{"node1", "node3"},
},
{
description: "Index nodes by type=infra",
indexerName: "infraNodes",
selectorString: "type=infra",
expectedMatches: []string{"node2"},
},
{
description: "Index nodes by zone=us-west-1",
indexerName: "westZoneNodes",
selectorString: "zone=us-west-1",
expectedMatches: []string{"node2", "node3"},
},
{
description: "Index nodes with multiple labels",
indexerName: "computeEastNodes",
selectorString: "type=compute,zone=us-east-1",
expectedMatches: []string{"node1"},
},
{
description: "No matching nodes",
indexerName: "noMatchNodes",
selectorString: "type=storage",
expectedMatches: []string{},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
fakeClient := fake.NewSimpleClientset(node1, node2, node3)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes().Informer()
selector, err := labels.Parse(tc.selectorString)
if err != nil {
t.Fatalf("Failed to parse selector: %v", err)
}
err = AddNodeSelectorIndexer(nodeInformer, tc.indexerName, selector)
if err != nil {
t.Fatalf("AddNodeSelectorIndexer failed: %v", err)
}
stopChannel := make(chan struct{})
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
defer close(stopChannel)
indexer := nodeInformer.GetIndexer()
objs, err := indexer.ByIndex(tc.indexerName, tc.indexerName)
if err != nil {
t.Errorf("Failed to query indexer: %v", err)
return
}
// Extract node names from the results
actualMatches := make([]string, 0, len(objs))
for _, obj := range objs {
node, ok := obj.(*v1.Node)
if !ok {
t.Errorf("Expected *v1.Node, got %T", obj)
continue
}
actualMatches = append(actualMatches, node.Name)
}
// Sort both slices for consistent comparison
sort.Strings(actualMatches)
expectedMatches := make([]string, len(tc.expectedMatches))
copy(expectedMatches, tc.expectedMatches)
sort.Strings(expectedMatches)
// Compare using cmp.Diff
if diff := cmp.Diff(expectedMatches, actualMatches); diff != "" {
t.Errorf("Node matches mismatch (-want +got):\n%s", diff)
}
})
}
}
func TestIsNodeUnschedulable(t *testing.T) {
tests := []struct {
description string
@@ -1020,6 +1217,64 @@ func TestNodeFit(t *testing.T) {
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with native sidecars with too much cpu does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100000, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with native sidecars with too much memory does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient memory"),
},
{
description: "Pod with small native sidecars fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
Resources: v1.ResourceRequirements{
Requests: createResourceList(100, 100*1000*1000, 0),
},
})
}),
node: node,
podsOnNode: []*v1.Pod{},
},
{
description: "Pod with large overhead does not fit on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
err: errors.New("insufficient cpu"),
},
{
description: "Pod with small overhead fits on node",
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
}),
node: node,
podsOnNode: []*v1.Pod{},
},
}
for _, tc := range tests {

View File

@@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/utils"
)
@@ -254,14 +255,32 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
return false
}
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
if IsBestEffortPod(pods[i]) {
iIsBestEffortPod := IsBestEffortPod(pods[i])
jIsBestEffortPod := IsBestEffortPod(pods[j])
iIsBurstablePod := IsBurstablePod(pods[i])
jIsBurstablePod := IsBurstablePod(pods[j])
iIsGuaranteedPod := IsGuaranteedPod(pods[i])
jIsGuaranteedPod := IsGuaranteedPod(pods[j])
if (iIsBestEffortPod && jIsBestEffortPod) || (iIsBurstablePod && jIsBurstablePod) || (iIsGuaranteedPod && jIsGuaranteedPod) {
iHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[i])
jHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[j])
if !iHasNoEvictonPolicy {
return true
}
if !jHasNoEvictonPolicy {
return false
}
return true
}
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
if iIsBestEffortPod {
return true
}
if iIsBurstablePod && jIsGuaranteedPod {
return true
}
return false
}
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
})
}

View File

@@ -117,6 +117,14 @@ func TestListPodsOnANode(t *testing.T) {
}
}
func getPodListNames(pods []*v1.Pod) []string {
names := []string{}
for _, pod := range pods {
names = append(names, pod.Name)
}
return names
}
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
@@ -149,11 +157,70 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
p6.Spec.Priority = nil
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
p7 := test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
test.SetPodPriority(pod, lowPriority)
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
}
})
// BestEffort
p8 := test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
test.SetPodPriority(pod, highPriority)
test.MakeBestEffortPod(pod)
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
}
})
// Burstable
p9 := test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
test.SetPodPriority(pod, highPriority)
test.MakeBurstablePod(pod)
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
}
})
// Guaranteed
p10 := test.BuildTestPod("p10", 400, 100, n1.Name, func(pod *v1.Pod) {
test.SetPodPriority(pod, highPriority)
test.MakeGuaranteedPod(pod)
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
}
})
// Burstable
p11 := test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
test.MakeBurstablePod(pod)
})
// Burstable
p12 := test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
test.MakeBurstablePod(pod)
pod.Annotations = map[string]string{
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
}
})
podList := []*v1.Pod{p1, p8, p9, p10, p2, p3, p4, p5, p6, p7, p11, p12}
// p5: no priority, best effort
// p11: no priority, burstable
// p6: no priority, guaranteed
// p1: low priority
// p7: low priority, prefer-no-eviction
// p2: high priority, best effort
// p8: high priority, best effort, prefer-no-eviction
// p3: high priority, burstable
// p9: high priority, burstable, prefer-no-eviction
// p4: high priority, guaranteed
// p10: high priority, guaranteed, prefer-no-eviction
expectedPodList := []*v1.Pod{p5, p11, p12, p6, p1, p7, p2, p8, p3, p9, p4, p10}
SortPodsBasedOnPriorityLowToHigh(podList)
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
if !reflect.DeepEqual(getPodListNames(podList), getPodListNames(expectedPodList)) {
t.Errorf("Pods were sorted in an unexpected order: %v, expected %v", getPodListNames(podList), getPodListNames(expectedPodList))
}
}

View File

@@ -108,6 +108,10 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
IgnorePvcPods: false,
EvictFailedBarePods: false,
IgnorePodsWithoutPDB: false,
PodProtections: defaultevictor.PodProtections{
DefaultDisabled: []defaultevictor.PodProtection{},
ExtraEnabled: []defaultevictor.PodProtection{},
},
},
}
@@ -172,13 +176,8 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
} else {
if prometheusConfig.Prometheus.URL == "" {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
} else {
u, err := url.Parse(prometheusConfig.Prometheus.URL)
if err != nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
} else if u.Scheme != "https" {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL's scheme is not https, got %q instead", u.Scheme))
}
} else if _, err := url.Parse(prometheusConfig.Prometheus.URL); err != nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
}
if prometheusConfig.Prometheus.AuthToken != nil {

View File

@@ -22,6 +22,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/conversion"
fakeclientset "k8s.io/client-go/kubernetes/fake"
utilptr "k8s.io/utils/ptr"
@@ -259,20 +260,6 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
},
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
},
{
description: "prometheus url does not have https error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "http://example.com:80",
},
},
},
},
result: fmt.Errorf("prometheus URL's scheme is not https, got \"http\" instead"),
},
{
description: "prometheus authtoken with no secret reference error",
deschedulerPolicy: api.DeschedulerPolicy{
@@ -510,6 +497,313 @@ profiles:
},
},
},
{
description: "test DisabledDefaultPodProtections configuration",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
defaultDisabled:
- "PodsWithLocalStorage"
- "DaemonSetPods"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "ProfileName",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PodProtections: defaultevictor.PodProtections{
DefaultDisabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithLocalStorage,
defaultevictor.DaemonSetPods,
},
},
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
NodeFit: true,
},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
},
},
},
{
description: "test podProtections extraEnabled configuration",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- "PodsWithPVC"
- "PodsWithoutPDB"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "ProfileName",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PodProtections: defaultevictor.PodProtections{
ExtraEnabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithPVC,
defaultevictor.PodsWithoutPDB,
},
},
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
NodeFit: true,
},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
},
},
},
{
description: "test both ExtraPodProtections and DisabledDefaultPodProtections configuration",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- "PodsWithPVC"
- "PodsWithoutPDB"
defaultDisabled:
- "PodsWithLocalStorage"
- "DaemonSetPods"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{
{
Name: "ProfileName",
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
PodProtections: defaultevictor.PodProtections{
ExtraEnabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithPVC,
defaultevictor.PodsWithoutPDB,
},
DefaultDisabled: []defaultevictor.PodProtection{
defaultevictor.PodsWithLocalStorage,
defaultevictor.DaemonSetPods,
},
},
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
NodeFit: true,
},
},
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
},
},
},
},
{
description: "test error when using both Deprecated fields and DisabledDefaultPodProtections/ExtraPodProtections",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
evictSystemCriticalPods: true
podProtections:
extraEnabled:
- "PodsWithPVC"
- "PodsWithoutPDB"
defaultDisabled:
- "PodsWithLocalStorage"
- "DaemonSetPods"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: nil,
err: fmt.Errorf("in profile ProfileName: cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
},
{
description: "test error when Disables a default protection that does not exist",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
defaultDisabled:
- "InvalidProtection"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: nil,
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"InvalidProtection\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
},
{
description: "test error when Enables an extra protection that does not exist",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- "InvalidProtection"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: nil,
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"InvalidProtection\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
},
{
description: "test error when Disables an extra protection",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
defaultDisabled:
- "PodsWithPVC"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: nil,
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"PodsWithPVC\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
},
{
description: "test error when Enables a default protection",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
podProtections:
extraEnabled:
- "DaemonSetPods"
priorityThreshold:
value: 2000000000
nodeFit: true
plugins:
filter:
enabled:
- "DefaultEvictor"
preEvictionFilter:
enabled:
- "DefaultEvictor"
`),
result: nil,
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"DaemonSetPods\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
},
}
for _, tc := range testCases {
@@ -517,14 +811,14 @@ profiles:
result, err := decode("filename", tc.policy, client, pluginregistry.PluginRegistry)
if err != nil {
if tc.err == nil {
t.Errorf("unexpected error: %s.", err.Error())
} else {
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
t.Fatalf("unexpected error: %s.", err.Error())
} else if err.Error() != tc.err.Error() {
t.Fatalf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
}
}
diff := cmp.Diff(tc.result, result)
if diff != "" && err == nil {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
if diff != "" {
t.Fatalf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
})
}

View File

@@ -23,6 +23,7 @@ type HandleImpl struct {
PodEvictorImpl *evictions.PodEvictor
MetricsCollectorImpl *metricscollector.MetricsCollector
PrometheusClientImpl promapi.Client
PluginInstanceIDImpl string
}
var _ frameworktypes.Handle = &HandleImpl{}
@@ -62,3 +63,7 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
}
func (hi *HandleImpl) PluginInstanceID() string {
return hi.PluginInstanceIDImpl
}

View File

@@ -60,7 +60,7 @@ type FakePlugin struct {
}
func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
@@ -73,8 +73,24 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
}
}
func NewPluginFncFromFakeWithReactor(fp *FakePlugin, callback func(ActionImpl)) pluginregistry.PluginBuilder {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
}
fp.handle = handle
fp.args = fakePluginArgs
callback(ActionImpl{handle: fp.handle})
return fp, nil
}
}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
@@ -165,7 +181,7 @@ type FakeDeschedulePlugin struct {
}
func NewFakeDeschedulePluginFncFromFake(fp *FakeDeschedulePlugin) pluginregistry.PluginBuilder {
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakeDeschedulePluginArgs, got %T", args)
@@ -252,7 +268,7 @@ type FakeBalancePlugin struct {
}
func NewFakeBalancePluginFncFromFake(fp *FakeBalancePlugin) pluginregistry.PluginBuilder {
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakeBalancePluginArgs, got %T", args)
@@ -339,7 +355,7 @@ type FakeFilterPlugin struct {
}
func NewFakeFilterPluginFncFromFake(fp *FakeFilterPlugin) pluginregistry.PluginBuilder {
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type FakeFilterPluginArgs, got %T", args)
@@ -408,3 +424,55 @@ func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
}
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
}
// RegisterFakePlugin registers a FakePlugin with the given registry
func RegisterFakePlugin(name string, plugin *FakePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewPluginFncFromFake(plugin),
&FakePlugin{},
&FakePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeDeschedulePlugin registers a FakeDeschedulePlugin with the given registry
func RegisterFakeDeschedulePlugin(name string, plugin *FakeDeschedulePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeDeschedulePluginFncFromFake(plugin),
&FakeDeschedulePlugin{},
&FakeDeschedulePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeBalancePlugin registers a FakeBalancePlugin with the given registry
func RegisterFakeBalancePlugin(name string, plugin *FakeBalancePlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeBalancePluginFncFromFake(plugin),
&FakeBalancePlugin{},
&FakeBalancePluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}
// RegisterFakeFilterPlugin registers a FakeFilterPlugin with the given registry
func RegisterFakeFilterPlugin(name string, plugin *FakeFilterPlugin, registry pluginregistry.Registry) {
pluginregistry.Register(
name,
NewFakeFilterPluginFncFromFake(plugin),
&FakeFilterPlugin{},
&FakeFilterPluginArgs{},
ValidateFakePluginArgs,
SetDefaults_FakePluginArgs,
registry,
)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -17,6 +17,8 @@ limitations under the License.
package pluginregistry
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
@@ -35,7 +37,7 @@ type PluginUtilities struct {
PluginArgDefaulter PluginArgDefaulter
}
type PluginBuilder = func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
type PluginBuilder = func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
type (
PluginArgValidator = func(args runtime.Object) error

View File

@@ -0,0 +1,94 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultevictor
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog/v2"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
)
func evictionConstraintsForLabelSelector(logger klog.Logger, labelSelector *metav1.LabelSelector) ([]constraint, error) {
if labelSelector != nil {
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
logger.Error(err, "could not get selector from label selector")
return nil, err
}
if !selector.Empty() {
return []constraint{
func(pod *v1.Pod) error {
if !selector.Matches(labels.Set(pod.Labels)) {
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
}
return nil
},
}, nil
}
}
return nil, nil
}
func evictionConstraintsForMinReplicas(logger klog.Logger, minReplicas uint, handle frameworktypes.Handle) ([]constraint, error) {
if minReplicas > 1 {
indexName := "metadata.ownerReferences"
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
if err != nil {
logger.Error(err, "could not get pod indexer by ownerRefs")
return nil, err
}
return []constraint{
func(pod *v1.Pod) error {
if len(pod.OwnerReferences) == 0 {
return nil
}
if len(pod.OwnerReferences) > 1 {
logger.V(5).Info("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
return nil
}
ownerRef := pod.OwnerReferences[0]
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
if err != nil {
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
}
if uint(len(objs)) < minReplicas {
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), minReplicas)
}
return nil
},
}, nil
}
return nil, nil
}
func evictionConstraintsForMinPodAge(minPodAge *metav1.Duration) []constraint {
if minPodAge != nil {
return []constraint{
func(pod *v1.Pod) error {
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < minPodAge.Duration {
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", minPodAge.String())
}
return nil
},
}
}
return nil
}

View File

@@ -14,19 +14,20 @@ limitations under the License.
package defaultevictor
import (
// "context"
"context"
"errors"
"fmt"
"time"
"maps"
"slices"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
@@ -48,6 +49,7 @@ type constraint func(pod *v1.Pod) error
// This plugin is only meant to customize other actions (extension points) of the evictor,
// like filtering, sorting, and other ones that might be relevant in the future
type DefaultEvictor struct {
logger klog.Logger
args *DefaultEvictorArgs
constraints []constraint
handle frameworktypes.Handle
@@ -66,150 +68,326 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
// New builds plugin from its arguments while passing a handle
// nolint: gocyclo
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
ev := &DefaultEvictor{
logger: logger,
handle: handle,
args: defaultEvictorArgs,
}
// add constraints
err := ev.addAllConstraints(logger, handle)
if err != nil {
return nil, err
}
return ev, nil
}
if defaultEvictorArgs.EvictFailedBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
func (d *DefaultEvictor) addAllConstraints(logger klog.Logger, handle frameworktypes.Handle) error {
args := d.args
// Determine effective protected policies based on the provided arguments.
effectivePodProtections := getEffectivePodProtections(args)
if err := applyEffectivePodProtections(d, effectivePodProtections, handle); err != nil {
return fmt.Errorf("failed to apply effective protected policies: %w", err)
}
if constraints, err := evictionConstraintsForLabelSelector(logger, args.LabelSelector); err != nil {
return err
} else {
d.constraints = append(d.constraints, constraints...)
}
if constraints, err := evictionConstraintsForMinReplicas(logger, args.MinReplicas, handle); err != nil {
return err
} else {
d.constraints = append(d.constraints, constraints...)
}
d.constraints = append(d.constraints, evictionConstraintsForMinPodAge(args.MinPodAge)...)
return nil
}
// applyEffectivePodProtections configures the evictor with specified Pod protection.
func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtection, handle frameworktypes.Handle) error {
protectionMap := make(map[PodProtection]bool, len(podProtections))
for _, protection := range podProtections {
protectionMap[protection] = true
}
// Apply protections
if err := applySystemCriticalPodsProtection(d, protectionMap, handle); err != nil {
return err
}
applyFailedBarePodsProtection(d, protectionMap)
applyLocalStoragePodsProtection(d, protectionMap)
applyDaemonSetPodsProtection(d, protectionMap)
applyPVCPodsProtection(d, protectionMap)
applyPodsWithoutPDBProtection(d, protectionMap, handle)
applyPodsWithResourceClaimsProtection(d, protectionMap)
return nil
}
// protectedPVCStorageClasses returns the list of storage classes that should
// be protected from eviction. If the list is empty or nil then all storage
// classes are protected (assuming PodsWithPVC protection is enabled).
func protectedPVCStorageClasses(d *DefaultEvictor) []ProtectedStorageClass {
protcfg := d.args.PodProtections.Config
if protcfg == nil {
return nil
}
scconfig := protcfg.PodsWithPVC
if scconfig == nil {
return nil
}
return scconfig.ProtectedStorageClasses
}
// podStorageClasses returns a list of storage classes referred by a pod. We
// need this when assessing if a pod should be protected because it refers to a
// protected storage class.
func podStorageClasses(inf informers.SharedInformerFactory, pod *v1.Pod) ([]string, error) {
lister := inf.Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(
pod.Namespace,
)
referred := map[string]bool{}
for _, vol := range pod.Spec.Volumes {
if vol.PersistentVolumeClaim == nil {
continue
}
claim, err := lister.Get(vol.PersistentVolumeClaim.ClaimName)
if err != nil {
return nil, fmt.Errorf(
"failed to get persistent volume claim %q/%q: %w",
pod.Namespace, vol.PersistentVolumeClaim.ClaimName, err,
)
}
// this should never happen as once a pvc is created with a nil
// storageClass it is automatically picked up by the default
// storage class. By returning an error here we make the pod
// protected from eviction.
if claim.Spec.StorageClassName == nil || *claim.Spec.StorageClassName == "" {
return nil, fmt.Errorf(
"failed to resolve storage class for pod %q/%q",
pod.Namespace, claim.Name,
)
}
referred[*claim.Spec.StorageClassName] = true
}
return slices.Collect(maps.Keys(referred)), nil
}
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[FailedBarePods]
if !isProtectionEnabled {
d.logger.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Enable evictFailedBarePods to evict bare pods in failed phase
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
}
return nil
})
} else {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
if len(ownerRefList) == 0 {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if len(podutil.OwnerRef(pod)) == 0 {
return fmt.Errorf("pod does not have any ownerRefs")
}
return nil
})
}
if !defaultEvictorArgs.EvictSystemCriticalPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if utils.IsCriticalPriorityPod(pod) {
return fmt.Errorf("pod has system critical priority")
}
return nil
})
}
if defaultEvictorArgs.PriorityThreshold != nil && (defaultEvictorArgs.PriorityThreshold.Value != nil || len(defaultEvictorArgs.PriorityThreshold.Name) > 0) {
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), defaultEvictorArgs.PriorityThreshold)
if err != nil {
return nil, fmt.Errorf("failed to get priority threshold: %v", err)
}
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
return nil
}
return fmt.Errorf("pod has higher priority than specified priority class threshold")
})
}
} else {
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
func applySystemCriticalPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) error {
isProtectionEnabled := protectionMap[SystemCriticalPods]
if !isProtectionEnabled {
d.logger.V(1).Info("Warning: System critical pod protection is disabled. This could cause eviction of Kubernetes system pods.")
return nil
}
if !defaultEvictorArgs.EvictLocalStoragePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithLocalStorage(pod) {
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if utils.IsCriticalPriorityPod(pod) {
return fmt.Errorf("pod has system critical priority and is protected against eviction")
}
return nil
})
priorityThreshold := d.args.PriorityThreshold
if priorityThreshold != nil && (priorityThreshold.Value != nil || len(priorityThreshold.Name) > 0) {
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), priorityThreshold)
if err != nil {
d.logger.Error(err, "failed to get priority threshold")
return err
}
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if !IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
return fmt.Errorf("pod has higher priority than specified priority class threshold")
}
return nil
})
}
if !defaultEvictorArgs.EvictDaemonSetPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
return nil
}
func applyLocalStoragePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[PodsWithLocalStorage]
if isProtectionEnabled {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithLocalStorage(pod) {
return fmt.Errorf("pod has local storage and is protected against eviction")
}
return nil
})
}
}
func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[DaemonSetPods]
if isProtectionEnabled {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
if utils.IsDaemonsetPod(ownerRefList) {
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
return fmt.Errorf("daemonset pods are protected against eviction")
}
return nil
})
}
if defaultEvictorArgs.IgnorePvcPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithPVC(pod) {
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
}
return nil
})
}
selector, err := metav1.LabelSelectorAsSelector(defaultEvictorArgs.LabelSelector)
if err != nil {
return nil, fmt.Errorf("could not get selector from label selector")
}
if defaultEvictorArgs.LabelSelector != nil && !selector.Empty() {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if !selector.Matches(labels.Set(pod.Labels)) {
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
}
return nil
})
}
// applyPVCPodsProtection protects pods that refer to a PVC from eviction. If
// the user has specified a list of storage classes to protect then only pods
// referring to PVCs of those storage classes are protected.
func applyPVCPodsProtection(d *DefaultEvictor, enabledProtections map[PodProtection]bool) {
if !enabledProtections[PodsWithPVC] {
return
}
if defaultEvictorArgs.MinReplicas > 1 {
indexName := "metadata.ownerReferences"
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
if err != nil {
return nil, err
}
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if len(pod.OwnerReferences) == 0 {
// if the user isn't filtering by storage classes we protect all pods
// referring to a PVC.
protected := protectedPVCStorageClasses(d)
if len(protected) == 0 {
d.constraints = append(
d.constraints,
func(pod *v1.Pod) error {
if utils.IsPodWithPVC(pod) {
return fmt.Errorf("pod with PVC is protected against eviction")
}
return nil
}
},
)
return
}
if len(pod.OwnerReferences) > 1 {
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
return nil
}
protectedsc := map[string]bool{}
for _, class := range protected {
protectedsc[class.Name] = true
}
ownerRef := pod.OwnerReferences[0]
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
d.constraints = append(
d.constraints, func(pod *v1.Pod) error {
classes, err := podStorageClasses(d.handle.SharedInformerFactory(), pod)
if err != nil {
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
return err
}
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
}
return nil
})
}
if defaultEvictorArgs.MinPodAge != nil {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
for _, class := range classes {
if !protectedsc[class] {
continue
}
return fmt.Errorf("pod using protected storage class %q", class)
}
return nil
})
}
},
)
}
if defaultEvictorArgs.IgnorePodsWithoutPDB {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {
isProtectionEnabled := protectionMap[PodsWithoutPDB]
if isProtectionEnabled {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
if err != nil {
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
}
if !hasPdb {
return fmt.Errorf("no PodDisruptionBudget found for pod")
return fmt.Errorf("pod does not have a PodDisruptionBudget and is protected against eviction")
}
return nil
})
}
}
return ev, nil
func applyPodsWithResourceClaimsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
isProtectionEnabled := protectionMap[PodsWithResourceClaims]
if isProtectionEnabled {
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
if utils.IsPodWithResourceClaims(pod) {
return fmt.Errorf("pod has ResourceClaims and descheduler is configured to protect ResourceClaims pods")
}
return nil
})
}
}
// getEffectivePodProtections determines which policies are currently active.
// It supports both new-style (PodProtections) and legacy-style flags.
func getEffectivePodProtections(args *DefaultEvictorArgs) []PodProtection {
// determine whether to use PodProtections config
useNewConfig := len(args.PodProtections.DefaultDisabled) > 0 || len(args.PodProtections.ExtraEnabled) > 0
if !useNewConfig {
// fall back to the Deprecated config
return legacyGetPodProtections(args)
}
// effective is the final list of active protection.
effective := make([]PodProtection, 0)
effective = append(effective, defaultPodProtections...)
// Remove PodProtections that are in the DefaultDisabled list.
effective = slices.DeleteFunc(effective, func(protection PodProtection) bool {
return slices.Contains(args.PodProtections.DefaultDisabled, protection)
})
// Add extra enabled in PodProtections
effective = append(effective, args.PodProtections.ExtraEnabled...)
return effective
}
// legacyGetPodProtections returns protections using deprecated boolean flags.
func legacyGetPodProtections(args *DefaultEvictorArgs) []PodProtection {
var protections []PodProtection
// defaultDisabled
if !args.EvictLocalStoragePods {
protections = append(protections, PodsWithLocalStorage)
}
if !args.EvictDaemonSetPods {
protections = append(protections, DaemonSetPods)
}
if !args.EvictSystemCriticalPods {
protections = append(protections, SystemCriticalPods)
}
if !args.EvictFailedBarePods {
protections = append(protections, FailedBarePods)
}
// extraEnabled
if args.IgnorePvcPods {
protections = append(protections, PodsWithPVC)
}
if args.IgnorePodsWithoutPDB {
protections = append(protections, PodsWithoutPDB)
}
return protections
}
// Name retrieves the plugin name
@@ -218,14 +396,15 @@ func (d *DefaultEvictor) Name() string {
}
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
if d.args.NodeFit {
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
if err != nil {
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
return false
}
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
return false
}
return true
@@ -234,12 +413,17 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
}
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
checkErrs := []error{}
if HaveEvictAnnotation(pod) {
return true
}
if d.args.NoEvictionPolicy == MandatoryNoEvictionPolicy && evictionutils.HaveNoEvictionAnnotation(pod) {
return false
}
if utils.IsMirrorPod(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
}
@@ -259,7 +443,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
}
if len(checkErrs) > 0 {
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
return false
}

File diff suppressed because it is too large Load Diff

View File

@@ -21,35 +21,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_DefaultEvictorArgs
// TODO: the final default values would be discussed in community
// SetDefaults_DefaultEvictorArgs sets the default values for the
// DefaultEvictorArgs configuration.
func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
args := obj.(*DefaultEvictorArgs)
if args.NodeSelector == "" {
args.NodeSelector = ""
}
if !args.EvictLocalStoragePods {
args.EvictLocalStoragePods = false
}
if !args.EvictDaemonSetPods {
args.EvictDaemonSetPods = false
}
if !args.EvictSystemCriticalPods {
args.EvictSystemCriticalPods = false
}
if !args.IgnorePvcPods {
args.IgnorePvcPods = false
}
if !args.EvictFailedBarePods {
args.EvictFailedBarePods = false
}
if args.LabelSelector == nil {
args.LabelSelector = nil
}
if args.PriorityThreshold == nil {
args.PriorityThreshold = nil
}
if !args.NodeFit {
args.NodeFit = false
}
}

View File

@@ -17,6 +17,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilptr "k8s.io/utils/ptr"

View File

@@ -25,16 +25,132 @@ import (
type DefaultEvictorArgs struct {
metav1.TypeMeta `json:",inline"`
NodeSelector string `json:"nodeSelector,omitempty"`
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
NodeFit bool `json:"nodeFit,omitempty"`
MinReplicas uint `json:"minReplicas,omitempty"`
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
NodeSelector string `json:"nodeSelector,omitempty"`
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
NodeFit bool `json:"nodeFit,omitempty"`
MinReplicas uint `json:"minReplicas,omitempty"`
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
NoEvictionPolicy NoEvictionPolicy `json:"noEvictionPolicy,omitempty"`
// PodProtections holds the list of enabled and disabled protection policies.
// Users can selectively disable certain default protection rules or enable extra ones.
PodProtections PodProtections `json:"podProtections,omitempty"`
// Deprecated: Use DisabledDefaultPodProtection with "PodsWithLocalStorage" instead.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
// Deprecated: Use DisabledDefaultPodProtection with "DaemonSetPods" instead.
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
// Deprecated: Use DisabledDefaultPodProtection with "SystemCriticalPods" instead.
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
// Deprecated: Use ExtraPodProtection with "PodsWithPVC" instead.
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
// Deprecated: Use ExtraPodProtection with "PodsWithoutPDB" instead.
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
// Deprecated: Use DisabledDefaultPodProtection with "FailedBarePods" instead.
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
}
// PodProtection defines the protection policy for a pod.
type PodProtection string
const (
PodsWithLocalStorage PodProtection = "PodsWithLocalStorage"
DaemonSetPods PodProtection = "DaemonSetPods"
SystemCriticalPods PodProtection = "SystemCriticalPods"
FailedBarePods PodProtection = "FailedBarePods"
PodsWithPVC PodProtection = "PodsWithPVC"
PodsWithoutPDB PodProtection = "PodsWithoutPDB"
PodsWithResourceClaims PodProtection = "PodsWithResourceClaims"
)
// PodProtections holds the list of enabled and disabled protection policies.
// NOTE: The list of default enabled pod protection policies is subject to change in future versions.
// +k8s:deepcopy-gen=true
type PodProtections struct {
// ExtraEnabled specifies additional protection policies that should be enabled.
// Supports: PodsWithPVC, PodsWithoutPDB
ExtraEnabled []PodProtection `json:"extraEnabled,omitempty"`
// DefaultDisabled specifies which default protection policies should be disabled.
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
// Config holds configuration for pod protection policies. Depending on
// the enabled policies this may be required. For instance, when
// enabling the PodsWithPVC policy the user may specify which storage
// classes should be protected.
Config *PodProtectionsConfig `json:"config,omitempty"`
}
// PodProtectionsConfig holds configuration for pod protection policies. The
// name of the fields here must be equal to a protection name. This struct is
// meant to be extended as more protection policies are added.
// +k8s:deepcopy-gen=true
type PodProtectionsConfig struct {
PodsWithPVC *PodsWithPVCConfig `json:"PodsWithPVC,omitempty"`
}
// PodsWithPVCConfig holds configuration for the PodsWithPVC protection.
// +k8s:deepcopy-gen=true
type PodsWithPVCConfig struct {
// ProtectedStorageClasses is a list of storage classes that we want to
// protect. i.e. if a pod refers to one of these storage classes it is
// protected from being evicted. If none is provided then all pods with
// PVCs are protected from eviction.
ProtectedStorageClasses []ProtectedStorageClass `json:"protectedStorageClasses,omitempty"`
}
// ProtectedStorageClass is used to determine what storage classes are
// protected when the PodsWithPVC protection is enabled. This object exists
// so we can later on extend it with more configuration if needed.
type ProtectedStorageClass struct {
Name string `json:"name"`
}
// defaultPodProtections holds the list of protection policies that are enabled by default.
// User can use the 'disabledDefaultPodProtections' evictor arguments (via PodProtections.DefaultDisabled)
// to disable any of these default protections.
//
// The following four policies are included by default:
// - PodsWithLocalStorage: Protects pods with local storage.
// - DaemonSetPods: Protects DaemonSet managed pods.
// - SystemCriticalPods: Protects system-critical pods.
// - FailedBarePods: Protects failed bare pods (not part of any controller).
var defaultPodProtections = []PodProtection{
PodsWithLocalStorage,
SystemCriticalPods,
FailedBarePods,
DaemonSetPods,
}
// extraPodProtections holds a list of protection policies that the user can optionally enable
// through the configuration (via PodProtections.ExtraEnabled). These policies are not enabled by default.
//
// Currently supported extra policies:
// - PodsWithPVC: Protects pods using PersistentVolumeClaims.
// - PodsWithoutPDB: Protects pods lacking a PodDisruptionBudget.
// - PodsWithResourceClaims: Protects pods using ResourceClaims.
var extraPodProtections = []PodProtection{
PodsWithPVC,
PodsWithoutPDB,
PodsWithResourceClaims,
}
// NoEvictionPolicy dictates whether a no-eviction policy is preferred or mandatory.
// Needs to be used with caution as this will give users ability to protect their pods
// from eviction. Which might work against enfored policies. E.g. plugins evicting pods
// violating security policies.
type NoEvictionPolicy string
const (
// PreferredNoEvictionPolicy interprets the no-eviction policy as a preference.
// Meaning the annotation will get ignored by the DefaultEvictor plugin.
// Yet, plugins may optionally sort their pods based on the annotation
// and focus on evicting pods that do not set the annotation.
PreferredNoEvictionPolicy NoEvictionPolicy = "Preferred"
// MandatoryNoEvictionPolicy interprets the no-eviction policy as mandatory.
// Every pod carying the annotation will get excluded from eviction.
MandatoryNoEvictionPolicy NoEvictionPolicy = "Mandatory"
)

View File

@@ -15,22 +15,86 @@ package defaultevictor
import (
"fmt"
"k8s.io/klog/v2"
"slices"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/klog/v2"
)
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
args := obj.(*DefaultEvictorArgs)
var allErrs []error
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
allErrs = append(allErrs, fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"))
}
if args.MinReplicas == 1 {
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
}
return nil
if args.NoEvictionPolicy != "" {
if args.NoEvictionPolicy != PreferredNoEvictionPolicy && args.NoEvictionPolicy != MandatoryNoEvictionPolicy {
allErrs = append(allErrs, fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}))
}
}
// check if any deprecated fields are set to true
hasDeprecatedFields := args.EvictLocalStoragePods || args.EvictDaemonSetPods ||
args.EvictSystemCriticalPods || args.IgnorePvcPods ||
args.EvictFailedBarePods || args.IgnorePodsWithoutPDB
// disallow mixing deprecated fields with PodProtections.ExtraEnabled and PodProtections.DefaultDisabled
if hasDeprecatedFields && (len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0) {
allErrs = append(allErrs, fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"))
}
if len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0 {
for _, policy := range args.PodProtections.ExtraEnabled {
if !slices.Contains(extraPodProtections, policy) {
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in ExtraEnabled: %q. Valid options are: %v",
string(policy), extraPodProtections))
}
}
for _, policy := range args.PodProtections.DefaultDisabled {
if !slices.Contains(defaultPodProtections, policy) {
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in DefaultDisabled: %q. Valid options are: %v",
string(policy), defaultPodProtections))
}
}
if hasDuplicates(args.PodProtections.DefaultDisabled) {
allErrs = append(allErrs, fmt.Errorf("PodProtections.DefaultDisabled contains duplicate entries"))
}
if hasDuplicates(args.PodProtections.ExtraEnabled) {
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
}
if slices.Contains(args.PodProtections.ExtraEnabled, PodsWithPVC) {
if args.PodProtections.Config != nil && args.PodProtections.Config.PodsWithPVC != nil {
protectedsc := args.PodProtections.Config.PodsWithPVC.ProtectedStorageClasses
for i, sc := range protectedsc {
if sc.Name == "" {
allErrs = append(allErrs, fmt.Errorf("PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[%d] name cannot be empty", i))
}
}
}
}
}
return utilerrors.NewAggregate(allErrs)
}
func hasDuplicates(slice []PodProtection) bool {
seen := make(map[PodProtection]struct{}, len(slice))
for _, item := range slice {
if _, exists := seen[item]; exists {
return true
}
seen[item] = struct{}{}
}
return false
}

View File

@@ -0,0 +1,242 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaultevictor
import (
"fmt"
"testing"
"k8s.io/apimachinery/pkg/runtime"
utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
)
func TestValidateDefaultEvictorArgs(t *testing.T) {
tests := []struct {
name string
args *DefaultEvictorArgs
errInfo error
}{
{
name: "passing invalid priority",
args: &DefaultEvictorArgs{
PriorityThreshold: &api.PriorityThreshold{
Value: utilptr.To[int32](1),
Name: "priority-name",
},
},
errInfo: fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"),
},
{
name: "passing invalid no eviction policy",
args: &DefaultEvictorArgs{
NoEvictionPolicy: "invalid-no-eviction-policy",
},
errInfo: fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}),
},
{
name: "Valid configuration with no deprecated fields",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{},
ExtraEnabled: []PodProtection{},
},
},
errInfo: nil,
},
{
name: "Valid configuration: both Disabled and ExtraEnabled",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{
DaemonSetPods,
PodsWithLocalStorage,
},
ExtraEnabled: []PodProtection{
PodsWithPVC,
},
},
},
errInfo: nil,
},
{
name: "Valid configuration with ExtraEnabled",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{
PodsWithPVC,
},
},
},
errInfo: nil,
},
{
name: "Invalid configuration: Deprecated field used with Disabled",
args: &DefaultEvictorArgs{
EvictLocalStoragePods: true,
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{
DaemonSetPods,
},
},
},
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
},
{
name: "Invalid configuration: Deprecated field used with ExtraPodProtections",
args: &DefaultEvictorArgs{
EvictDaemonSetPods: true,
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{
PodsWithPVC,
},
},
},
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
},
{
name: "MinReplicas warning logged but no error",
args: &DefaultEvictorArgs{
MinReplicas: 1,
},
errInfo: nil,
},
{
name: "Invalid ExtraEnabled: Unknown policy",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{"InvalidPolicy"},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "InvalidPolicy". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
},
{
name: "Invalid ExtraEnabled: Misspelled policy",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{"PodsWithPVCC"},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "PodsWithPVCC". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
},
{
name: "Invalid ExtraEnabled: Policy from DefaultDisabled list",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{DaemonSetPods},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "DaemonSetPods". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
},
{
name: "Invalid DefaultDisabled: Unknown policy",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{"InvalidPolicy"},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "InvalidPolicy". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
},
{
name: "Invalid DefaultDisabled: Misspelled policy",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{"PodsWithLocalStorag"},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithLocalStorag". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
},
{
name: "Invalid DefaultDisabled: Policy from ExtraEnabled list",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{PodsWithPVC},
},
},
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithPVC". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
},
{
name: "Invalid ExtraEnabled duplicate",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
},
},
errInfo: fmt.Errorf(`PodProtections.ExtraEnabled contains duplicate entries`),
},
{
name: "Invalid DefaultDisabled duplicate",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage},
},
},
errInfo: fmt.Errorf(`PodProtections.DefaultDisabled contains duplicate entries`),
},
{
name: "Invalid DefaultDisabled duplicate and Invalid ExtraEnabled duplicate and passing invalid no eviction policy",
args: &DefaultEvictorArgs{
NoEvictionPolicy: "invalid-no-eviction-policy",
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage, PodsWithoutPDB},
},
},
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
},
{
name: "Protected storage classes without storage class name",
args: &DefaultEvictorArgs{
PodProtections: PodProtections{
ExtraEnabled: []PodProtection{PodsWithPVC},
Config: &PodProtectionsConfig{
PodsWithPVC: &PodsWithPVCConfig{
ProtectedStorageClasses: []ProtectedStorageClass{
{
Name: "",
},
{
Name: "protected-storage-class-0",
},
{
Name: "",
},
{
Name: "protected-storage-class-1",
},
},
},
},
},
},
errInfo: fmt.Errorf(`[PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[0] name cannot be empty, PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[2] name cannot be empty]`),
},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
validateErr := ValidateDefaultEvictorArgs(runtime.Object(testCase.args))
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -46,6 +46,7 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
*out = new(v1.Duration)
**out = **in
}
in.PodProtections.DeepCopyInto(&out.PodProtections)
return
}
@@ -66,3 +67,76 @@ func (in *DefaultEvictorArgs) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodProtections) DeepCopyInto(out *PodProtections) {
*out = *in
if in.ExtraEnabled != nil {
in, out := &in.ExtraEnabled, &out.ExtraEnabled
*out = make([]PodProtection, len(*in))
copy(*out, *in)
}
if in.DefaultDisabled != nil {
in, out := &in.DefaultDisabled, &out.DefaultDisabled
*out = make([]PodProtection, len(*in))
copy(*out, *in)
}
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(PodProtectionsConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtections.
func (in *PodProtections) DeepCopy() *PodProtections {
if in == nil {
return nil
}
out := new(PodProtections)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodProtectionsConfig) DeepCopyInto(out *PodProtectionsConfig) {
*out = *in
if in.PodsWithPVC != nil {
in, out := &in.PodsWithPVC, &out.PodsWithPVC
*out = new(PodsWithPVCConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtectionsConfig.
func (in *PodProtectionsConfig) DeepCopy() *PodProtectionsConfig {
if in == nil {
return nil
}
out := new(PodProtectionsConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsWithPVCConfig) DeepCopyInto(out *PodsWithPVCConfig) {
*out = *in
if in.ProtectedStorageClasses != nil {
in, out := &in.ProtectedStorageClasses, &out.ProtectedStorageClasses
*out = make([]ProtectedStorageClass, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsWithPVCConfig.
func (in *PodsWithPVCConfig) DeepCopy() *PodsWithPVCConfig {
if in == nil {
return nil
}
out := new(PodsWithPVCConfig)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -46,6 +46,7 @@ var _ fwtypes.DeschedulePlugin = &Example{}
// Example is our plugin (implementing the DeschedulePlugin interface). This
// plugin will evict pods that match a regex and are older than a certain age.
type Example struct {
logger klog.Logger
handle fwtypes.Handle
args *ExampleArgs
podFilter podutil.FilterFunc
@@ -55,12 +56,13 @@ type Example struct {
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
// set, evictor interface, shared informer factory and other instruments shared
// across different plugins.
func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
// make sure we are receiving the right argument type.
exampleArgs, ok := args.(*ExampleArgs)
if !ok {
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
// we can use the included and excluded namespaces to filter the pods we want
// to evict.
@@ -90,6 +92,7 @@ func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
}
return &Example{
logger: logger,
handle: handle,
podFilter: podFilter,
args: exampleArgs,
@@ -107,7 +110,7 @@ func (d *Example) Name() string {
// of nodes we need to process.
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
var podsToEvict []*v1.Pod
logger := klog.FromContext(ctx)
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
logger.Info("Example plugin starting descheduling")
re, err := regexp.Compile(d.args.Regex)
@@ -137,10 +140,23 @@ func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Sta
// go node by node getting all pods that we can evict.
for _, node := range nodes {
// ListAllPodsOnANode is a helper function that retrieves all
// pods filtering out the ones we can't evict. We merge the
// default filters with the one we created above.
pods, err := podutil.ListAllPodsOnANode(
// ListAllPodsOnANode is a helper function that retrieves all pods filtering out the ones we can't evict.
// ListPodsOnANode is a helper function that retrieves all pods(excluding Succeeded or Failed phases) filtering out the ones we can't evict.
// We merge the default filters with the one we created above.
//
// The difference between ListPodsOnANode and ListAllPodsOnANode lies in their handling of Pods based on their phase:
// - ListPodsOnANode excludes Pods that are in Succeeded or Failed phases because they do not occupy any resources.
// - ListAllPodsOnANode does not exclude Pods based on their phase, listing all Pods regardless of their state.
//
// In this context, we prefer using ListPodsOnANode because:
// 1. It ensures that only active Pods (not in Succeeded or Failed states) are considered for eviction.
// 2. This helps avoid unnecessary processing of Pods that no longer consume resources.
// 3. By applying an additional filter (d.podFilter and filter), we can further refine which Pods are eligible for eviction,
// ensuring that only Pods meeting specific criteria are selected.
//
// However, if you need to consider all Pods including those in Succeeded or Failed states for other purposes,
// you should use ListAllPodsOnANode instead.
pods, err := podutil.ListPodsOnANode(
node.Name,
d.handle.GetPodsAssignedToNodeFunc(),
podutil.WrapFilterFuncs(d.podFilter, filter),

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -44,6 +44,7 @@ var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
// can schedule according to its plugin. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
type HighNodeUtilization struct {
logger klog.Logger
handle frameworktypes.Handle
args *HighNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
@@ -55,7 +56,7 @@ type HighNodeUtilization struct {
// NewHighNodeUtilization builds plugin from its arguments while passing a handle.
func NewHighNodeUtilization(
genericArgs runtime.Object, handle frameworktypes.Handle,
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
) (frameworktypes.Plugin, error) {
args, ok := genericArgs.(*HighNodeUtilizationArgs)
if !ok {
@@ -64,6 +65,7 @@ func NewHighNodeUtilization(
genericArgs,
)
}
logger := klog.FromContext(ctx).WithValues("plugin", HighNodeUtilizationPluginName)
// this plugins worries only about thresholds but the nodeplugins
// package was made to take two thresholds into account, one for low
@@ -113,6 +115,7 @@ func NewHighNodeUtilization(
)
return &HighNodeUtilization{
logger: logger,
handle: handle,
args: args,
resourceNames: resourceNames,
@@ -135,6 +138,8 @@ func (h *HighNodeUtilization) Name() string {
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
// less nodes are used.
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
if err := h.usageClient.sync(ctx, nodes); err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -165,7 +170,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
// schedulable nodes.
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
klog.V(2).InfoS(
logger.V(2).Info(
"Node is unschedulable",
"node", klog.KObj(nodesMap[nodeName]),
)
@@ -184,7 +189,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
category := []string{"underutilized", "overutilized"}
for i := range nodeGroups {
for nodeName := range nodeGroups[i] {
klog.InfoS(
logger.Info(
"Node has been classified",
"category", category[i],
"node", klog.KObj(nodesMap[nodeName]),
@@ -208,18 +213,18 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
if len(lowNodes) == 0 {
klog.V(1).InfoS(
logger.V(1).Info(
"No node is underutilized, nothing to do here, you might tune your thresholds further",
)
return nil
}
if len(lowNodes) <= h.args.NumberOfNodes {
klog.V(1).InfoS(
logger.V(1).Info(
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
"underutilizedNodes", len(lowNodes),
"numberOfNodes", h.args.NumberOfNodes,
@@ -228,12 +233,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
}
if len(lowNodes) == len(nodes) {
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
logger.V(1).Info("All nodes are underutilized, nothing to do here")
return nil
}
if len(schedulableNodes) == 0 {
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
return nil
}

View File

@@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
@@ -97,17 +96,7 @@ func TestHighNodeUtilization(t *testing.T) {
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -115,8 +104,7 @@ func TestHighNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
@@ -168,8 +156,7 @@ func TestHighNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
@@ -249,9 +236,7 @@ func TestHighNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
@@ -466,9 +451,7 @@ func TestHighNodeUtilization(t *testing.T) {
// pods in the other nodes must not be evicted
// because they do not have the extended
// resource defined in their requests.
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
@@ -523,6 +506,7 @@ func TestHighNodeUtilization(t *testing.T) {
}
plugin, err := NewHighNodeUtilization(
ctx,
&HighNodeUtilizationArgs{
Thresholds: testCase.thresholds,
EvictionModes: testCase.evictionModes,
@@ -637,7 +621,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
plugin, err := NewHighNodeUtilization(ctx, &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 40,
},

View File

@@ -43,6 +43,7 @@ var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
// nodes. Note that CPU/Memory requests are used to calculate nodes'
// utilization and not the actual resource usage.
type LowNodeUtilization struct {
logger klog.Logger
handle frameworktypes.Handle
args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool
@@ -57,7 +58,7 @@ type LowNodeUtilization struct {
// handle. this plugin aims to move workload from overutilized nodes to
// underutilized nodes.
func NewLowNodeUtilization(
genericArgs runtime.Object, handle frameworktypes.Handle,
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
) (frameworktypes.Plugin, error) {
args, ok := genericArgs.(*LowNodeUtilizationArgs)
if !ok {
@@ -66,6 +67,7 @@ func NewLowNodeUtilization(
genericArgs,
)
}
logger := klog.FromContext(ctx).WithValues("plugin", LowNodeUtilizationPluginName)
// resourceNames holds a list of resources for which the user has
// provided thresholds for. extendedResourceNames holds those as well
@@ -115,6 +117,7 @@ func NewLowNodeUtilization(
}
return &LowNodeUtilization{
logger: logger,
handle: handle,
args: args,
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
@@ -135,6 +138,8 @@ func (l *LowNodeUtilization) Name() string {
// utilized nodes to under utilized nodes. The goal here is to evenly
// distribute pods across nodes.
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
if err := l.usageClient.sync(ctx, nodes); err != nil {
return &frameworktypes.Status{
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -182,7 +187,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
// underutilized but aren't schedulable are ignored.
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
klog.V(2).InfoS(
logger.V(2).Info(
"Node is unschedulable, thus not considered as underutilized",
"node", klog.KObj(nodesMap[nodeName]),
)
@@ -207,7 +212,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
for nodeName := range nodeGroups[i] {
classifiedNodes[nodeName] = true
klog.InfoS(
logger.Info(
"Node has been classified",
"category", categories[i],
"node", klog.KObj(nodesMap[nodeName]),
@@ -233,7 +238,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
// log nodes that are appropriately utilized.
for nodeName := range nodesMap {
if !classifiedNodes[nodeName] {
klog.InfoS(
logger.Info(
"Node is appropriately utilized",
"node", klog.KObj(nodesMap[nodeName]),
"usage", nodesUsageMap[nodeName],
@@ -245,20 +250,20 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
// log messages for nodes with low and high utilization
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes))
logger.V(1).Info("Criteria for a node under utilization", l.underCriteria...)
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
logger.V(1).Info("Criteria for a node above target utilization", l.overCriteria...)
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
if len(lowNodes) == 0 {
klog.V(1).InfoS(
logger.V(1).Info(
"No node is underutilized, nothing to do here, you might tune your thresholds further",
)
return nil
}
if len(lowNodes) <= l.args.NumberOfNodes {
klog.V(1).InfoS(
logger.V(1).Info(
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
"underutilizedNodes", len(lowNodes),
"numberOfNodes", l.args.NumberOfNodes,
@@ -267,12 +272,12 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
}
if len(lowNodes) == len(nodes) {
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
logger.V(1).Info("All nodes are underutilized, nothing to do here")
return nil
}
if len(highNodes) == 0 {
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
return nil
}

View File

@@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
@@ -92,25 +91,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -155,25 +143,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -233,25 +210,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -310,17 +276,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -328,8 +284,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -379,17 +334,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -397,8 +342,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -462,17 +406,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -480,8 +414,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -525,9 +458,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
@@ -537,23 +468,11 @@ func TestLowNodeUtilization(t *testing.T) {
test.MakeBestEffortPod(pod)
}),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
}),
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -561,8 +480,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -632,17 +550,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -651,8 +559,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetNormalOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
@@ -747,12 +654,8 @@ func TestLowNodeUtilization(t *testing.T) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
}),
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p3", 0, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p8", 0, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -795,17 +698,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -813,8 +706,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -872,17 +764,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -890,8 +772,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
},
nodemetricses: []*v1beta1.NodeMetrics{
@@ -975,17 +856,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -993,8 +864,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1037,17 +907,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1055,8 +915,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1106,17 +965,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1124,8 +973,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
},
@@ -1218,17 +1066,7 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
@@ -1236,8 +1074,7 @@ func TestLowNodeUtilization(t *testing.T) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1282,25 +1119,14 @@ func TestLowNodeUtilization(t *testing.T) {
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
@@ -1391,7 +1217,7 @@ func TestLowNodeUtilization(t *testing.T) {
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
}
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
Thresholds: tc.thresholds,
TargetThresholds: tc.targetThresholds,
UseDeviationThresholds: tc.useDeviationThresholds,
@@ -1551,7 +1377,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourcePods: 20,
},
@@ -1575,17 +1401,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
func withLocalStorage(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
test.SetHostPathEmptyDirVolumeSource(pod)
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}
@@ -1594,8 +1410,7 @@ func withCriticalPod(pod *v1.Pod) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
test.SetPodPriority(pod, utils.SystemCriticalPriority)
}
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
@@ -1824,7 +1639,7 @@ func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
result: tc.samples,
dataType: model.ValVector,
}
plugin, err := NewLowNodeUtilization(tc.args, handle)
plugin, err := NewLowNodeUtilization(ctx, tc.args, handle)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}

View File

@@ -176,13 +176,14 @@ func evictPodsFromSourceNodes(
usageClient usageClient,
maxNoOfPodsToEvictPerNode *uint,
) {
logger := klog.FromContext(ctx)
available, err := assessAvailableResourceInNodes(destinationNodes, resourceNames)
if err != nil {
klog.ErrorS(err, "unable to assess available resources in nodes")
logger.Error(err, "unable to assess available resources in nodes")
return
}
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(available)...)
logger.V(1).Info("Total capacity to be moved", usageToKeysAndValues(available)...)
destinationTaints := make(map[string][]v1.Taint, len(destinationNodes))
for _, node := range destinationNodes {
@@ -190,14 +191,14 @@ func evictPodsFromSourceNodes(
}
for _, node := range sourceNodes {
klog.V(3).InfoS(
logger.V(3).Info(
"Evicting pods from node",
"node", klog.KObj(node.node),
"usage", node.usage,
)
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
klog.V(2).InfoS(
logger.V(2).Info(
"Pods on node",
"node", klog.KObj(node.node),
"allPods", len(node.allPods),
@@ -206,14 +207,14 @@ func evictPodsFromSourceNodes(
)
if len(removablePods) == 0 {
klog.V(1).InfoS(
logger.V(1).Info(
"No removable pods on node, try next node",
"node", klog.KObj(node.node),
)
continue
}
klog.V(1).InfoS(
logger.V(1).Info(
"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers",
)
@@ -260,6 +261,7 @@ func evictPods(
usageClient usageClient,
maxNoOfPodsToEvictPerNode *uint,
) error {
logger := klog.FromContext(ctx)
// preemptive check to see if we should continue evicting pods.
if !continueEviction(nodeInfo, totalAvailableUsage) {
return nil
@@ -274,7 +276,7 @@ func evictPods(
var evictionCounter uint = 0
for _, pod := range inputPods {
if maxNoOfPodsToEvictPerNode != nil && evictionCounter >= *maxNoOfPodsToEvictPerNode {
klog.V(3).InfoS(
logger.V(3).Info(
"Max number of evictions per node per plugin reached",
"limit", *maxNoOfPodsToEvictPerNode,
)
@@ -282,7 +284,7 @@ func evictPods(
}
if !utils.PodToleratesTaints(pod, destinationTaints) {
klog.V(3).InfoS(
logger.V(3).Info(
"Skipping eviction for pod, doesn't tolerate node taint",
"pod", klog.KObj(pod),
)
@@ -297,7 +299,7 @@ func evictPods(
WithoutNamespaces(excludedNamespaces).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "could not build preEvictionFilter with namespace exclusion")
logger.Error(err, "could not build preEvictionFilter with namespace exclusion")
continue
}
@@ -311,9 +313,8 @@ func evictPods(
podUsage, err := usageClient.podUsage(pod)
if err != nil {
if _, ok := err.(*notSupportedError); !ok {
klog.Errorf(
"unable to get pod usage for %v/%v: %v",
pod.Namespace, pod.Name, err,
logger.Error(err,
"unable to get pod usage", "pod", klog.KObj(pod),
)
continue
}
@@ -325,18 +326,18 @@ func evictPods(
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
return err
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
continue
}
}
if maxNoOfPodsToEvictPerNode == nil && unconstrainedResourceEviction {
klog.V(3).InfoS("Currently, only a single pod eviction is allowed")
logger.V(3).Info("Currently, only a single pod eviction is allowed")
break
}
evictionCounter++
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
logger.V(3).Info("Evicted pods", "pod", klog.KObj(pod))
if unconstrainedResourceEviction {
continue
}
@@ -345,7 +346,7 @@ func evictPods(
keysAndValues := []any{"node", nodeInfo.node.Name}
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
klog.V(3).InfoS("Updated node usage", keysAndValues...)
logger.V(3).Info("Updated node usage", keysAndValues...)
// make sure we should continue evicting pods.
if !continueEviction(nodeInfo, totalAvailableUsage) {

View File

@@ -264,12 +264,13 @@ func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]
}
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
logger := klog.FromContext(ctx)
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
if err != nil {
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
}
if len(warnings) > 0 {
klog.Infof("prometheus metrics warnings: %v", warnings)
logger.Info("prometheus metrics warnings: %v", warnings)
}
if results.Type() != model.ValVector {

View File

@@ -257,3 +257,138 @@ func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
})
}
}
func TestValidateHighNodeUtilizationPluginConfig(t *testing.T) {
extendedResource := v1.ResourceName("example.com/foo")
tests := []struct {
name string
args *HighNodeUtilizationArgs
wantErr bool
errMsg string
}{
{
name: "valid configuration with CPU and memory",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 90,
},
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
},
wantErr: false,
},
{
name: "valid configuration with extended resource",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 85,
extendedResource: 95,
},
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
},
wantErr: false,
},
{
name: "empty thresholds",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{},
},
wantErr: true,
errMsg: "no resource threshold is configured",
},
{
name: "threshold below minimum (0%)",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: -1,
},
},
wantErr: true,
errMsg: "cpu threshold not in [0, 100] range",
},
{
name: "threshold above maximum (100%)",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceMemory: 101,
},
},
wantErr: true,
errMsg: "memory threshold not in [0, 100] range",
},
{
name: "multiple thresholds with one out of range",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourceMemory: 150,
},
},
wantErr: true,
errMsg: "memory threshold not in [0, 100] range",
},
{
name: "evictableNamespaces with Exclude (allowed)",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
},
EvictableNamespaces: &api.Namespaces{
Exclude: []string{"ns1", "ns2"},
},
},
wantErr: false,
},
{
name: "invalid eviction mode",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
},
EvictionModes: []EvictionMode{"InvalidMode"},
},
wantErr: true,
errMsg: "invalid eviction mode InvalidMode",
},
{
name: "missing eviction modes (nil) - should be allowed (treated as empty)",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
},
EvictionModes: nil,
},
wantErr: false,
},
{
name: "empty eviction modes slice - should be allowed",
args: &HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
},
EvictionModes: []EvictionMode{},
},
wantErr: false,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
err := ValidateHighNodeUtilizationArgs(runtime.Object(tc.args))
if tc.wantErr {
if err == nil {
t.Fatalf("expected error, but got nil")
}
if tc.errMsg != "" && err.Error() != tc.errMsg {
t.Errorf("expected error message: %q, but got: %q", tc.errMsg, err.Error())
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
})
}
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -0,0 +1,155 @@
# PodLifeTime Plugin
## What It Does
The PodLifeTime plugin evicts pods that have been running for too long. You can configure a maximum age threshold, and the plugin evicts pods older than that threshold. The oldest pods are evicted first.
## How It Works
The plugin examines all pods across your nodes and selects those that exceed the configured age threshold. You can further narrow down which pods are considered by specifying:
- Which namespaces to include or exclude
- Which labels pods must have
- Which states pods must be in (e.g., Running, Pending, CrashLoopBackOff)
Once pods are selected, they are sorted by age (oldest first) and evicted in that order. Eviction stops when limits are reached (per-node limits, total limits, or Pod Disruption Budget constraints).
## Use Cases
- **Resource Leakage Mitigation**: Restart long-running pods that may have accumulated memory leaks, stale cache, or resource leaks
```yaml
args:
maxPodLifeTimeSeconds: 604800 # 7 days
states: [Running]
```
- **Ephemeral Workload Cleanup**: Remove long-running batch jobs, test pods, or temporary workloads that have exceeded their expected lifetime
```yaml
args:
maxPodLifeTimeSeconds: 7200 # 2 hours
states: [Succeeded, Failed]
```
- **Node Hygiene**: Remove forgotten or stuck pods that are consuming resources but not making progress
```yaml
args:
maxPodLifeTimeSeconds: 3600 # 1 hour
states: [CrashLoopBackOff, ImagePullBackOff, ErrImagePull]
includingInitContainers: true
```
- **Config/Secret Update Pickup**: Force pod restart to pick up updated ConfigMaps, Secrets, or environment variables
```yaml
args:
maxPodLifeTimeSeconds: 86400 # 1 day
states: [Running]
labelSelector:
matchLabels:
config-refresh: enabled
```
- **Security Rotation**: Periodically refresh pods to pick up new security tokens, certificates, or patched container images
```yaml
args:
maxPodLifeTimeSeconds: 259200 # 3 days
states: [Running]
namespaces:
exclude: [kube-system]
```
- **Dev/Test Environment Cleanup**: Automatically clean up old pods in development or staging namespaces
```yaml
args:
maxPodLifeTimeSeconds: 86400 # 1 day
namespaces:
include: [dev, staging, test]
```
- **Cluster Health Freshness**: Ensure pods periodically restart to maintain cluster health and verify workloads can recover from restarts
```yaml
args:
maxPodLifeTimeSeconds: 604800 # 7 days
states: [Running]
namespaces:
exclude: [kube-system, production]
```
- **Rebalancing Assistance**: Work alongside other descheduler strategies by removing old pods to allow better pod distribution
```yaml
args:
maxPodLifeTimeSeconds: 1209600 # 14 days
states: [Running]
```
- **Non-Critical Stateful Refresh**: Occasionally reset tolerable stateful workloads that can handle data loss or have external backup mechanisms
```yaml
args:
maxPodLifeTimeSeconds: 2592000 # 30 days
labelSelector:
matchLabels:
stateful-tier: cache
```
## Configuration
| Parameter | Description | Type | Required | Default |
|-----------|-------------|------|----------|---------|
| `maxPodLifeTimeSeconds` | Pods older than this many seconds are evicted | `uint` | Yes | - |
| `namespaces` | Limit eviction to specific namespaces (or exclude specific namespaces) | `Namespaces` | No | `nil` |
| `labelSelector` | Only evict pods matching these labels | `metav1.LabelSelector` | No | `nil` |
| `states` | Only evict pods in specific states (e.g., Running, CrashLoopBackOff) | `[]string` | No | `nil` |
| `includingInitContainers` | When checking states, also check init container states | `bool` | No | `false` |
| `includingEphemeralContainers` | When checking states, also check ephemeral container states | `bool` | No | `false` |
### Discovering states
Each pod is checked for the following locations to discover its relevant state:
1. **Pod Phase** - The overall pod lifecycle phase:
- `Running` - Pod is running on a node
- `Pending` - Pod has been accepted but containers are not yet running
- `Succeeded` - All containers terminated successfully
- `Failed` - All containers terminated, at least one failed
- `Unknown` - Pod state cannot be determined
2. **Pod Status Reason** - Why the pod is in its current state:
- `NodeAffinity` - Pod cannot be scheduled due to node affinity rules
- `NodeLost` - Node hosting the pod is lost
- `Shutdown` - Pod terminated due to node shutdown
- `UnexpectedAdmissionError` - Pod admission failed unexpectedly
3. **Container Waiting Reason** - Why containers are waiting to start:
- `PodInitializing` - Pod is still initializing
- `ContainerCreating` - Container is being created
- `ImagePullBackOff` - Image pull is failing and backing off
- `CrashLoopBackOff` - Container is crashing repeatedly
- `CreateContainerConfigError` - Container configuration is invalid
- `ErrImagePull` - Image cannot be pulled
- `CreateContainerError` - Container creation failed
- `InvalidImageName` - Image name is invalid
By default, only regular containers are checked. Enable `includingInitContainers` or `includingEphemeralContainers` to also check those container types.
## Example
```yaml
apiVersion: descheduler/v1alpha2
kind: DeschedulerPolicy
profiles:
- name: default
plugins:
deschedule:
enabled:
- name: PodLifeTime
pluginConfig:
- name: PodLifeTime
args:
maxPodLifeTimeSeconds: 86400 # 1 day
namespaces:
include:
- default
states:
- Running
```
This configuration evicts Running pods in the `default` namespace that are older than 1 day.

View File

@@ -38,17 +38,19 @@ var _ frameworktypes.DeschedulePlugin = &PodLifeTime{}
// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold
type PodLifeTime struct {
logger klog.Logger
handle frameworktypes.Handle
args *PodLifeTimeArgs
podFilter podutil.FilterFunc
}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
podLifeTimeArgs, ok := args.(*PodLifeTimeArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if podLifeTimeArgs.Namespaces != nil {
@@ -115,6 +117,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
}
return &PodLifeTime{
logger: logger,
handle: handle,
podFilter: podFilter,
args: podLifeTimeArgs,
@@ -130,9 +133,9 @@ func (d *PodLifeTime) Name() string {
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
podsToEvict := make([]*v1.Pod, 0)
nodeMap := make(map[string]*v1.Node, len(nodes))
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
logger.V(2).Info("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods
@@ -161,7 +164,7 @@ loop:
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -18,35 +18,39 @@ package podlifetime
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"sort"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidatePodLifeTimeArgs validates PodLifeTime arguments
func ValidatePodLifeTimeArgs(obj runtime.Object) error {
args := obj.(*PodLifeTimeArgs)
var allErrs []error
if args.MaxPodLifeTimeSeconds == nil {
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
allErrs = append(allErrs, fmt.Errorf("MaxPodLifeTimeSeconds not set"))
}
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
if args.LabelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
}
}
podLifeTimeAllowedStates := sets.New(
// Pod Status Phase
string(v1.PodRunning),
string(v1.PodPending),
string(v1.PodSucceeded),
string(v1.PodFailed),
string(v1.PodUnknown),
// Pod Status Reasons
@@ -70,8 +74,10 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
)
if !podLifeTimeAllowedStates.HasAll(args.States...) {
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
allowed := podLifeTimeAllowedStates.UnsortedList()
sort.Strings(allowed)
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package podlifetime
import (
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
@@ -26,7 +27,7 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
testCases := []struct {
description string
args *PodLifeTimeArgs
expectError bool
errInfo error
}{
{
description: "valid arg, no errors",
@@ -34,7 +35,13 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{string(v1.PodRunning)},
},
expectError: false,
},
{
description: "Pod Status Reasons Succeeded or Failed",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{string(v1.PodSucceeded), string(v1.PodFailed)},
},
},
{
description: "Pod Status Reasons CrashLoopBackOff ",
@@ -42,31 +49,41 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{"CrashLoopBackOff"},
},
expectError: false,
},
{
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: nil,
},
expectError: true,
errInfo: fmt.Errorf("MaxPodLifeTimeSeconds not set"),
},
{
description: "invalid pod state arg, expects errors",
args: &PodLifeTimeArgs{
States: []string{string(v1.NodeRunning)},
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
States: []string{string("InvalidState")},
},
expectError: true,
errInfo: fmt.Errorf("states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]"),
},
{
description: "nil MaxPodLifeTimeSeconds arg and invalid pod state arg, expects errors",
args: &PodLifeTimeArgs{
MaxPodLifeTimeSeconds: nil,
States: []string{string("InvalidState")},
},
errInfo: fmt.Errorf("[MaxPodLifeTimeSeconds not set, states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]]"),
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidatePodLifeTimeArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
validateErr := ValidatePodLifeTimeArgs(testCase.args)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -45,6 +45,7 @@ const PluginName = "RemoveDuplicates"
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
type RemoveDuplicates struct {
logger klog.Logger
handle frameworktypes.Handle
args *RemoveDuplicatesArgs
podFilter podutil.FilterFunc
@@ -62,11 +63,12 @@ func (po podOwner) String() string {
}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if removeDuplicatesArgs.Namespaces != nil {
@@ -85,6 +87,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
}
return &RemoveDuplicates{
logger: logger,
handle: handle,
args: removeDuplicatesArgs,
podFilter: podFilter,
@@ -102,12 +105,13 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
ownerKeyOccurence := make(map[podOwner]int32)
nodeCount := 0
nodeMap := make(map[string]*v1.Node)
logger := klog.FromContext(klog.NewContext(ctx, r.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
logger.V(2).Info("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
if err != nil {
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
logger.Error(err, "Error listing evictable pods on node", "node", klog.KObj(node))
continue
}
nodeMap[node.Name] = node
@@ -163,7 +167,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
for _, keys := range existing {
if reflect.DeepEqual(keys, podContainerKeys) {
matched = true
klog.V(3).InfoS("Duplicate found", "pod", klog.KObj(pod))
logger.V(3).Info("Duplicate found", "pod", klog.KObj(pod))
for _, ownerRef := range ownerRefList {
ownerKey := podOwner{
namespace: pod.ObjectMeta.Namespace,
@@ -195,16 +199,16 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
targetNodes := getTargetNodes(podNodes, nodes)
klog.V(2).InfoS("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
logger.V(2).Info("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
if len(targetNodes) < 2 {
klog.V(1).InfoS("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
logger.V(1).Info("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
continue
}
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(len(targetNodes))))
loop:
for nodeName, pods := range podNodes {
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
logger.V(2).Info("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
// list of duplicated pods does not contain the original referential pod
if len(pods)+1 > upperAvg {
// It's assumed all duplicated pods are in the same priority class
@@ -220,7 +224,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
}
}
}

View File

@@ -17,14 +17,16 @@ import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
func ValidateRemoveDuplicatesArgs(obj runtime.Object) error {
args := obj.(*RemoveDuplicatesArgs)
var allErrs []error
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@@ -1,6 +1,7 @@
package removeduplicates
import (
"fmt"
"testing"
"sigs.k8s.io/descheduler/pkg/api"
@@ -11,6 +12,7 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
description string
args *RemoveDuplicatesArgs
expectError bool
errInfo error
}{
{
description: "valid namespace args, no errors",
@@ -20,7 +22,6 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
@@ -31,17 +32,19 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
Exclude: []string{"kube-system"},
},
},
expectError: true,
errInfo: fmt.Errorf("only one of Include/Exclude namespaces can be set"),
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemoveDuplicatesArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
validateErr := ValidateRemoveDuplicatesArgs(testCase.args)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -36,6 +36,7 @@ const PluginName = "RemoveFailedPods"
// RemoveFailedPods evicts pods in failed status phase that match the given args criteria
type RemoveFailedPods struct {
logger klog.Logger
handle frameworktypes.Handle
args *RemoveFailedPodsArgs
podFilter podutil.FilterFunc
@@ -44,11 +45,12 @@ type RemoveFailedPods struct {
var _ frameworktypes.DeschedulePlugin = &RemoveFailedPods{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
failedPodsArgs, ok := args.(*RemoveFailedPodsArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if failedPodsArgs.Namespaces != nil {
@@ -71,7 +73,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
if err := validateCanEvict(pod, failedPodsArgs); err != nil {
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
return false
}
@@ -79,6 +81,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
})
return &RemoveFailedPods{
logger: logger,
handle: handle,
podFilter: podFilter,
args: failedPodsArgs,
@@ -92,8 +95,9 @@ func (d *RemoveFailedPods) Name() string {
// Deschedule extension point implementation for the plugin
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
logger.V(2).Info("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods
@@ -114,7 +118,7 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
}
}
}

View File

@@ -362,7 +362,7 @@ func TestRemoveFailedPods(t *testing.T) {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
plugin, err := New(&RemoveFailedPodsArgs{
plugin, err := New(ctx, &RemoveFailedPodsArgs{
Reasons: tc.args.Reasons,
ExitCodes: tc.args.ExitCodes,
MinPodLifetimeSeconds: tc.args.MinPodLifetimeSeconds,

View File

@@ -18,21 +18,23 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
// ValidateRemoveFailedPodsArgs validates RemoveFailedPods arguments
func ValidateRemoveFailedPodsArgs(obj runtime.Object) error {
args := obj.(*RemoveFailedPodsArgs)
var allErrs []error
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
if args.LabelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
}
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@@ -1,6 +1,7 @@
package removefailedpods
import (
"fmt"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -12,7 +13,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
testCases := []struct {
description string
args *RemoveFailedPodsArgs
expectError bool
errInfo error
}{
{
description: "valid namespace args, no errors",
@@ -24,7 +25,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
Reasons: []string{"ReasonDoesNotMatch"},
MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds,
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
@@ -34,7 +34,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
Exclude: []string{"kube-system"},
},
},
expectError: true,
errInfo: fmt.Errorf(`only one of Include/Exclude namespaces can be set`),
},
{
description: "valid label selector args, no errors",
@@ -43,7 +43,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
@@ -56,16 +55,19 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
},
},
},
expectError: true,
errInfo: fmt.Errorf(`failed to get label selectors from strategy's params: [key: Invalid value: "": name part must be non-empty; name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), values: Invalid value: null: for 'in', 'notin' operators, values set can't be empty]`),
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemoveFailedPodsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
validateErr := ValidateRemoveFailedPodsArgs(testCase.args)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -19,6 +19,7 @@ package removepodshavingtoomanyrestarts
import (
"context"
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -36,6 +37,7 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
type RemovePodsHavingTooManyRestarts struct {
logger klog.Logger
handle frameworktypes.Handle
args *RemovePodsHavingTooManyRestartsArgs
podFilter podutil.FilterFunc
@@ -44,11 +46,12 @@ type RemovePodsHavingTooManyRestarts struct {
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if tooManyRestartsArgs.Namespaces != nil {
@@ -69,7 +72,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
if err := validateCanEvict(pod, tooManyRestartsArgs); err != nil {
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
return false
}
return true
@@ -99,6 +102,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
}
return &RemovePodsHavingTooManyRestarts{
logger: logger,
handle: handle,
args: tooManyRestartsArgs,
podFilter: podFilter,
@@ -112,8 +116,9 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
// Deschedule extension point implementation for the plugin
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
logger.V(2).Info("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
// no pods evicted as error encountered retrieving evictable Pods
@@ -121,6 +126,15 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
Err: fmt.Errorf("error listing pods on a node: %v", err),
}
}
podRestarts := make(map[*v1.Pod]int32)
for _, pod := range pods {
podRestarts[pod] = getPodTotalRestarts(pod, d.args.IncludingInitContainers)
}
// sort pods by restarts count
sort.Slice(pods, func(i, j int) bool {
return podRestarts[pods[i]] > podRestarts[pods[j]]
})
totalPods := len(pods)
loop:
for i := 0; i < totalPods; i++ {
@@ -134,7 +148,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
}
}
}
@@ -145,11 +159,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
func validateCanEvict(pod *v1.Pod, tooManyRestartsArgs *RemovePodsHavingTooManyRestartsArgs) error {
var err error
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
if tooManyRestartsArgs.IncludingInitContainers {
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
}
restarts := getPodTotalRestarts(pod, tooManyRestartsArgs.IncludingInitContainers)
if restarts < tooManyRestartsArgs.PodRestartThreshold {
err = fmt.Errorf("number of container restarts (%v) not exceeding the threshold", restarts)
}
@@ -165,3 +175,12 @@ func calcContainerRestartsFromStatuses(statuses []v1.ContainerStatus) int32 {
}
return restarts
}
// getPodTotalRestarts get total restarts of a pod.
func getPodTotalRestarts(pod *v1.Pod, includeInitContainers bool) int32 {
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
if includeInitContainers {
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
}
return restarts
}

View File

@@ -22,7 +22,6 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
@@ -33,73 +32,93 @@ import (
"sigs.k8s.io/descheduler/test"
)
func initPods(node *v1.Node) []*v1.Pod {
pods := make([]*v1.Pod, 0)
const (
nodeName1 = "node1"
nodeName2 = "node2"
nodeName3 = "node3"
nodeName4 = "node4"
nodeName5 = "node5"
)
for i := int32(0); i <= 9; i++ {
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
}
// pod at index i will have 25 * i restarts.
pod.Status = v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 5 * i,
},
func setPodContainerStatusRestartCount(pod *v1.Pod, base int32) {
pod.Status = v1.PodStatus{
InitContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 5 * base,
},
ContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 10 * i,
},
{
RestartCount: 10 * i,
},
},
ContainerStatuses: []v1.ContainerStatus{
{
RestartCount: 10 * base,
},
}
pods = append(pods, pod)
}
// The following 3 pods won't get evicted.
// A daemonset.
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
// A pod with local storage.
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pods[7].Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
{
RestartCount: 10 * base,
},
},
}
// A Mirror Pod.
pods[8].Annotations = test.GetMirrorPodAnnotation()
}
func initPodContainersWithStatusRestartCount(name string, base int32, apply func(pod *v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName1, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
// pod at index i will have 25 * i restarts, 5 for init container, 20 for other two containers
setPodContainerStatusRestartCount(pod, base)
if apply != nil {
apply(pod)
}
})
}
func initPods(apply func(pod *v1.Pod)) []*v1.Pod {
pods := make([]*v1.Pod, 0)
for i := int32(0); i <= 9; i++ {
switch i {
default:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, apply))
// The following 3 pods won't get evicted.
// A daemonset.
case 6:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
if apply != nil {
apply(pod)
}
}))
// A pod with local storage.
case 7:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
test.SetHostPathEmptyDirVolumeSource(pod)
if apply != nil {
apply(pod)
}
}))
// A Mirror Pod.
case 8:
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
pod.Annotations = test.GetMirrorPodAnnotation()
if apply != nil {
apply(pod)
}
}))
}
}
pods = append(
pods,
test.BuildTestPod("CPU-consumer-1", 150, 100, nodeName4, test.SetNormalOwnerRef),
test.BuildTestPod("CPU-consumer-2", 150, 100, nodeName5, test.SetNormalOwnerRef),
)
return pods
}
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
})
node3 := test.BuildTestNode("node3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
createRemovePodsHavingTooManyRestartsAgrs := func(
podRestartThresholds int32,
includingInitContainers bool,
@@ -114,207 +133,261 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
tests := []struct {
description string
pods []*v1.Pod
nodes []*v1.Node
args RemovePodsHavingTooManyRestartsArgs
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
nodeFit bool
applyFunc func([]*v1.Pod)
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts under threshold, no pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "Some pods have total restarts bigger than threshold",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 5,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
nodes: []*v1.Node{node1},
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 6,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
nodes: []*v1.Node{node1},
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 1,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
nodes: []*v1.Node{node1},
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 1,
nodeFit: false,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1},
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node2},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName2, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
{
Key: "hardware",
Value: "gpu",
Effect: v1.TaintEffectNoSchedule,
},
}
}),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node3},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName3, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
}),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node4},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
test.BuildTestNode(nodeName4, 200, 3000, 10, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
nodes: []*v1.Node{node1, node5},
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
pods: initPods(func(pod *v1.Pod) {
if len(pod.Status.ContainerStatuses) > 0 {
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
}
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
if len(pod.Status.ContainerStatuses) > 0 {
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
},
{
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
pods: initPods(nil),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
buildTestNode(nodeName5, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods running with state=Running, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
pods: initPods(func(pod *v1.Pod) {
pod.Status.Phase = v1.PodRunning
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "pods pending with state=Running, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
pods: initPods(func(pod *v1.Pod) {
pod.Status.Phase = v1.PodPending
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
pods: initPods(func(pod *v1.Pod) {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
}
}
},
},
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
},
{
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
nodeFit: true,
},
{
description: "pods running with state=Running, 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodRunning
}
},
},
{
description: "pods pending with state=Running, 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
nodes: []*v1.Node{node1},
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
pods: initPods(func(pod *v1.Pod) {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}),
nodes: []*v1.Node{
buildTestNode(nodeName1, nil),
},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.Phase = v1.PodPending
}
},
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}
},
},
{
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
applyFunc: func(pods []*v1.Pod) {
for _, pod := range pods {
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
{
State: v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
},
},
}
}
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
pods := append(
initPods(node1),
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, test.SetNormalOwnerRef),
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, test.SetNormalOwnerRef),
)
if tc.applyFunc != nil {
tc.applyFunc(pods)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -322,7 +395,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range pods {
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
@@ -341,6 +414,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
}
plugin, err := New(
ctx,
&tc.args,
handle)
if err != nil {

View File

@@ -15,29 +15,32 @@ package removepodshavingtoomanyrestarts
import (
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
)
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
args := obj.(*RemovePodsHavingTooManyRestartsArgs)
var allErrs []error
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
if args.LabelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
}
}
if args.PodRestartThreshold < 1 {
return fmt.Errorf("invalid PodsHavingTooManyRestarts threshold")
allErrs = append(allErrs, fmt.Errorf("invalid PodsHavingTooManyRestarts threshold"))
}
allowedStates := sets.New(
@@ -49,8 +52,10 @@ func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
)
if !allowedStates.HasAll(args.States...) {
return fmt.Errorf("states must be one of %v", allowedStates.UnsortedList())
allowed := allowedStates.UnsortedList()
sort.Strings(allowed)
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package removepodshavingtoomanyrestarts
import (
"fmt"
"testing"
v1 "k8s.io/api/core/v1"
@@ -26,7 +27,7 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
testCases := []struct {
description string
args *RemovePodsHavingTooManyRestartsArgs
expectError bool
errInfo error
}{
{
description: "valid arg, no errors",
@@ -34,14 +35,13 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
PodRestartThreshold: 1,
States: []string{string(v1.PodRunning)},
},
expectError: false,
},
{
description: "invalid PodRestartThreshold arg, expects errors",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 0,
},
expectError: true,
errInfo: fmt.Errorf(`invalid PodsHavingTooManyRestarts threshold`),
},
{
description: "invalid States arg, expects errors",
@@ -49,7 +49,7 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
PodRestartThreshold: 1,
States: []string{string(v1.PodFailed)},
},
expectError: true,
errInfo: fmt.Errorf(`states must be one of [CrashLoopBackOff Running]`),
},
{
description: "allows CrashLoopBackOff state",
@@ -57,17 +57,26 @@ func TestValidateRemovePodsHavingTooManyRestartsArgs(t *testing.T) {
PodRestartThreshold: 1,
States: []string{"CrashLoopBackOff"},
},
expectError: false,
},
{
description: "invalid PodRestartThreshold arg and invalid States arg, expects errors",
args: &RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 0,
States: []string{string(v1.PodFailed)},
},
errInfo: fmt.Errorf(`[invalid PodsHavingTooManyRestarts threshold, states must be one of [CrashLoopBackOff Running]]`),
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsHavingTooManyRestartsArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
validateErr := ValidateRemovePodsHavingTooManyRestartsArgs(testCase.args)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -35,6 +35,7 @@ const PluginName = "RemovePodsViolatingInterPodAntiAffinity"
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which violate inter pod anti affinity
type RemovePodsViolatingInterPodAntiAffinity struct {
logger klog.Logger
handle frameworktypes.Handle
args *RemovePodsViolatingInterPodAntiAffinityArgs
podFilter podutil.FilterFunc
@@ -43,11 +44,12 @@ type RemovePodsViolatingInterPodAntiAffinity struct {
var _ frameworktypes.DeschedulePlugin = &RemovePodsViolatingInterPodAntiAffinity{}
// New builds plugin from its arguments while passing a handle
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
interPodAntiAffinityArgs, ok := args.(*RemovePodsViolatingInterPodAntiAffinityArgs)
if !ok {
return nil, fmt.Errorf("want args to be of type RemovePodsViolatingInterPodAntiAffinityArgs, got %T", args)
}
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
var includedNamespaces, excludedNamespaces sets.Set[string]
if interPodAntiAffinityArgs.Namespaces != nil {
@@ -65,6 +67,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
}
return &RemovePodsViolatingInterPodAntiAffinity{
logger: logger,
handle: handle,
podFilter: podFilter,
args: interPodAntiAffinityArgs,
@@ -77,6 +80,7 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Name() string {
}
func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
pods, err := podutil.ListPodsOnNodes(nodes, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
if err != nil {
return &frameworktypes.Status{
@@ -90,7 +94,7 @@ func (d *RemovePodsViolatingInterPodAntiAffinity) Deschedule(ctx context.Context
loop:
for _, node := range nodes {
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
logger.V(2).Info("Processing node", "node", klog.KObj(node))
pods := podsOnANode[node.Name]
// sort the evict-able Pods based on priority, if there are multiple pods with same priority, they are sorted based on QoS tiers.
podutil.SortPodsBasedOnPriorityLowToHigh(pods)
@@ -115,7 +119,7 @@ loop:
case *evictions.EvictionTotalLimitError:
return nil
default:
klog.Errorf("eviction failed: %v", err)
logger.Error(err, "eviction failed")
}
}
}

View File

@@ -33,84 +33,75 @@ import (
"sigs.k8s.io/descheduler/test"
)
func TestPodAntiAffinity(t *testing.T) {
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
})
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
})
const (
nodeName1 = "n1"
nodeName2 = "n2"
nodeName3 = "n3"
nodeName4 = "n4"
nodeName5 = "n5"
)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
p9.DeletionTimestamp = &metav1.Time{}
p10.DeletionTimestamp = &metav1.Time{}
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
return test.BuildTestNode(name, 2000, 3000, 10, apply)
}
criticalPriority := utils.SystemCriticalPriority
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
pod.Spec.Priority = &criticalPriority
})
p2.Labels = map[string]string{"foo": "bar"}
p5.Labels = map[string]string{"foo": "bar"}
p6.Labels = map[string]string{"foo": "bar"}
p7.Labels = map[string]string{"foo1": "bar1"}
p11.Labels = map[string]string{"foo": "bar"}
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
test.SetNormalOwnerRef(p1)
test.SetNormalOwnerRef(p2)
test.SetNormalOwnerRef(p3)
test.SetNormalOwnerRef(p4)
test.SetNormalOwnerRef(p5)
test.SetNormalOwnerRef(p6)
test.SetNormalOwnerRef(p7)
test.SetNormalOwnerRef(p9)
test.SetNormalOwnerRef(p10)
test.SetNormalOwnerRef(p11)
// set pod anti affinity
test.SetPodAntiAffinity(p1, "foo", "bar")
test.SetPodAntiAffinity(p3, "foo", "bar")
test.SetPodAntiAffinity(p4, "foo", "bar")
test.SetPodAntiAffinity(p5, "foo1", "bar1")
test.SetPodAntiAffinity(p6, "foo1", "bar1")
test.SetPodAntiAffinity(p7, "foo", "bar")
test.SetPodAntiAffinity(p9, "foo", "bar")
test.SetPodAntiAffinity(p10, "foo", "bar")
// set pod priority
test.SetPodPriority(p5, 100)
test.SetPodPriority(p6, 50)
test.SetPodPriority(p7, 0)
// Set pod node selectors
p8.Spec.NodeSelector = map[string]string{
"datacenter": "west",
func setNodeMainRegionLabel(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"region": "main-region",
}
}
func buildTestNode1() *v1.Node {
return buildTestNode(nodeName1, setNodeMainRegionLabel)
}
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
return test.BuildTestPod(name, 100, 0, nodeName, apply)
}
func buildTestPodForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
return buildTestPod(name, nodeName1, apply)
}
func setPodAntiAffinityFooBar(pod *v1.Pod) {
test.SetPodAntiAffinity(pod, "foo", "bar")
}
func setPodAntiAffinityFoo1Bar1(pod *v1.Pod) {
test.SetPodAntiAffinity(pod, "foo1", "bar1")
}
func setLabelsFooBar(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar"}
}
func setLabelsFoo1Bar1(pod *v1.Pod) {
pod.Labels = map[string]string{"foo1": "bar1"}
}
func buildTestPodWithAntiAffinityForNode1(name string) *v1.Pod {
return buildTestPodForNode1(name, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
})
}
func buildTestPodP2ForNode1() *v1.Pod {
return buildTestPodForNode1("p2", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
})
}
func buildTestPodNonEvictableForNode1() *v1.Pod {
criticalPriority := utils.SystemCriticalPriority
return buildTestPodForNode1("non-evict", func(pod *v1.Pod) {
test.SetPodPriority(pod, criticalPriority)
setLabelsFooBar(pod)
})
}
func TestPodAntiAffinity(t *testing.T) {
var uint1 uint = 1
var uint3 uint = 3
@@ -125,87 +116,204 @@ func TestPodAntiAffinity(t *testing.T) {
nodes []*v1.Node
}{
{
description: "Maximum pods to evict - 0",
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
description: "Maximum pods to evict - 0",
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxNoOfPodsToEvictTotal)",
maxNoOfPodsToEvictPerNamespace: &uint3,
maxNoOfPodsToEvictTotal: &uint1,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evict only 1 pod after sorting",
pods: []*v1.Pod{p5, p6, p7},
nodes: []*v1.Node{node1},
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1},
description: "Evict only 1 pod after sorting",
pods: []*v1.Pod{
buildTestPodForNode1("p5", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
setPodAntiAffinityFoo1Bar1(pod)
test.SetPodPriority(pod, 100)
}),
buildTestPodForNode1("p6", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
setPodAntiAffinityFoo1Bar1(pod)
test.SetPodPriority(pod, 50)
}),
buildTestPodForNode1("p7", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFoo1Bar1(pod)
setPodAntiAffinityFooBar(pod)
test.SetPodPriority(pod, 0)
}),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
nodes: []*v1.Node{node1},
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node2},
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodForNode1("p8", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
buildTestNode(nodeName2, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
"datacenter": "east",
}
}),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
nodes: []*v1.Node{node1, node3},
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{
buildTestPodForNode1("p8", func(pod *v1.Pod) {
pod.Spec.NodeSelector = map[string]string{
"datacenter": "west",
}
}),
buildTestPodNonEvictableForNode1(),
},
nodes: []*v1.Node{
buildTestNode1(),
buildTestNode(nodeName3, func(node *v1.Node) {
node.Spec = v1.NodeSpec{
Unschedulable: true,
}
}),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{p9, p10},
nodes: []*v1.Node{node1},
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{
buildTestPodForNode1("p9", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
pod.DeletionTimestamp = &metav1.Time{}
}),
buildTestPodForNode1("p10", func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setPodAntiAffinityFooBar(pod)
pod.DeletionTimestamp = &metav1.Time{}
}),
},
nodes: []*v1.Node{
buildTestNode1(),
},
expectedEvictedPodCount: 0,
},
{
description: "Won't evict pods because only other node doesn't have enough resources",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1, node4},
description: "Won't evict pods because only other node doesn't have enough resources",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPodP2ForNode1(),
buildTestPodWithAntiAffinityForNode1("p3"),
buildTestPodWithAntiAffinityForNode1("p4"),
},
nodes: []*v1.Node{
buildTestNode1(),
test.BuildTestNode(nodeName4, 2, 2, 1, nil),
},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
pods: []*v1.Pod{p1, p11},
nodes: []*v1.Node{node1, node5},
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
pods: []*v1.Pod{
buildTestPodWithAntiAffinityForNode1("p1"),
buildTestPod("p11", nodeName5, func(pod *v1.Pod) {
test.SetNormalOwnerRef(pod)
setLabelsFooBar(pod)
}),
},
nodes: []*v1.Node{
buildTestNode1(),
test.BuildTestNode(nodeName5, 200, 3000, 10, setNodeMainRegionLabel),
},
expectedEvictedPodCount: 1,
nodeFit: false,
},
@@ -240,6 +348,7 @@ func TestPodAntiAffinity(t *testing.T) {
}
plugin, err := New(
ctx,
&RemovePodsViolatingInterPodAntiAffinityArgs{},
handle,
)

View File

@@ -18,21 +18,23 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
)
// ValidateRemovePodsViolatingInterPodAntiAffinityArgs validates ValidateRemovePodsViolatingInterPodAntiAffinity arguments
func ValidateRemovePodsViolatingInterPodAntiAffinityArgs(obj runtime.Object) error {
args := obj.(*RemovePodsViolatingInterPodAntiAffinityArgs)
var allErrs []error
// At most one of include/exclude can be set
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
}
if args.LabelSelector != nil {
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
}
}
return nil
return utilerrors.NewAggregate(allErrs)
}

View File

@@ -1,6 +1,7 @@
package removepodsviolatinginterpodantiaffinity
import (
"fmt"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -11,7 +12,7 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
testCases := []struct {
description string
args *RemovePodsViolatingInterPodAntiAffinityArgs
expectError bool
errInfo error
}{
{
description: "valid namespace args, no errors",
@@ -20,7 +21,6 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
Include: []string{"default"},
},
},
expectError: false,
},
{
description: "invalid namespaces args, expects error",
@@ -30,7 +30,7 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
Exclude: []string{"kube-system"},
},
},
expectError: true,
errInfo: fmt.Errorf(`only one of Include/Exclude namespaces can be set`),
},
{
description: "valid label selector args, no errors",
@@ -39,7 +39,6 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
},
},
expectError: false,
},
{
description: "invalid label selector args, expects errors",
@@ -52,16 +51,19 @@ func TestValidateRemovePodsViolatingInterPodAntiAffinityArgs(t *testing.T) {
},
},
},
expectError: true,
errInfo: fmt.Errorf(`failed to get label selectors from strategy's params: [key: Invalid value: "": name part must be non-empty; name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), values: Invalid value: null: for 'in', 'notin' operators, values set can't be empty]`),
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
err := ValidateRemovePodsViolatingInterPodAntiAffinityArgs(tc.args)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected arg validation behavior")
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
validateErr := ValidateRemovePodsViolatingInterPodAntiAffinityArgs(testCase.args)
if validateErr == nil || testCase.errInfo == nil {
if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
} else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
}
})
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

Some files were not shown because too many files have changed in this diff Show More