mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
296 Commits
v0.34.0
...
85b1d97dda
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
85b1d97dda | ||
|
|
b6aadc1643 | ||
|
|
c4ec31684f | ||
|
|
7d2c31cd39 | ||
|
|
cf9edca33c | ||
|
|
f164943257 | ||
|
|
1fe9e2c345 | ||
|
|
16ccff8ed8 | ||
|
|
38f0f15787 | ||
|
|
52f2aea444 | ||
|
|
f3c63011cc | ||
|
|
47b939dd86 | ||
|
|
89c88f483b | ||
|
|
d558fa3a5b | ||
|
|
7ef3673d4c | ||
|
|
988e0b8868 | ||
|
|
fc8ae3b4e8 | ||
|
|
6d7fedc982 | ||
|
|
769ded35f1 | ||
|
|
3283635149 | ||
|
|
994ce3e2f7 | ||
|
|
90e4115b78 | ||
|
|
8913d79d14 | ||
|
|
9a5d7e8286 | ||
|
|
674e463bc2 | ||
|
|
1df3ef5030 | ||
|
|
3068f8431a | ||
|
|
dfd2b95d2d | ||
|
|
3bb4529c34 | ||
|
|
b8765bd8ee | ||
|
|
d666e4b830 | ||
|
|
08f733863e | ||
|
|
93a516a58a | ||
|
|
60da931e0e | ||
|
|
12a9db4da0 | ||
|
|
41da7497c3 | ||
|
|
b56f3cdae9 | ||
|
|
162a2d14b7 | ||
|
|
78788d72de | ||
|
|
956eeefede | ||
|
|
1f7bd1fba9 | ||
|
|
5fdf368593 | ||
|
|
50b6e178c1 | ||
|
|
c1ad532c46 | ||
|
|
7e40aae2dc | ||
|
|
e09bd976f5 | ||
|
|
ffb1f44144 | ||
|
|
cb595f2524 | ||
|
|
c46817f6df | ||
|
|
032db38d6c | ||
|
|
c1cd3ae794 | ||
|
|
060d9c8573 | ||
|
|
51bcf60ccf | ||
|
|
b472549cf6 | ||
|
|
c68e8a6d06 | ||
|
|
68d9d4d044 | ||
|
|
452b1ff7d9 | ||
|
|
f123f78b44 | ||
|
|
ca0f7535fb | ||
|
|
78ff3fe92a | ||
|
|
0269283185 | ||
|
|
57ed329feb | ||
|
|
b96a41a745 | ||
|
|
6b6f7ba5c7 | ||
|
|
a3ca65ea14 | ||
|
|
d81580c93e | ||
|
|
0f7ff8a2b7 | ||
|
|
d27afd0319 | ||
|
|
3d48efdff4 | ||
|
|
e5d5cf2229 | ||
|
|
f65209d4fa | ||
|
|
b9ceb9144f | ||
|
|
2bbec0cbc6 | ||
|
|
a363da9806 | ||
|
|
63b3bd3b4d | ||
|
|
7fb935c650 | ||
|
|
f85b2f8d4d | ||
|
|
0580b5942c | ||
|
|
4171af7e8a | ||
|
|
a1678cd464 | ||
|
|
2f90d1dd01 | ||
|
|
f0cda32b6e | ||
|
|
43523113ff | ||
|
|
1b7889f4a3 | ||
|
|
b86315f097 | ||
|
|
0d496dfc5d | ||
|
|
d6b35eaed6 | ||
|
|
dc18f9f330 | ||
|
|
39212419e6 | ||
|
|
64f77ce6ee | ||
|
|
ca5326c5c4 | ||
|
|
9cf075ffc4 | ||
|
|
3325fe0b8b | ||
|
|
6c41ebd8f3 | ||
|
|
ba034d6e0e | ||
|
|
3289554f90 | ||
|
|
72575c2f23 | ||
|
|
07616c3fc0 | ||
|
|
cad120881f | ||
|
|
aec4416099 | ||
|
|
7b9d5d2539 | ||
|
|
9f7629136f | ||
|
|
42d255fd95 | ||
|
|
183a138d82 | ||
|
|
f669c45892 | ||
|
|
a2ffbc1261 | ||
|
|
2cda1bd89d | ||
|
|
691a1da43b | ||
|
|
8fe74c7a0c | ||
|
|
102bd6a91d | ||
|
|
3d1e15bb82 | ||
|
|
3c02d9029c | ||
|
|
57a3e610a7 | ||
|
|
7cec27d467 | ||
|
|
688b45011a | ||
|
|
a96451030c | ||
|
|
a4930ebc83 | ||
|
|
ad872f8b77 | ||
|
|
a0654df270 | ||
|
|
03b5a9a967 | ||
|
|
9f2d22c1f7 | ||
|
|
cbe1c1e559 | ||
|
|
87182c5e8f | ||
|
|
2765e31048 | ||
|
|
87f675a2cd | ||
|
|
a400a66d51 | ||
|
|
fa427a2b37 | ||
|
|
90672630da | ||
|
|
6a00214457 | ||
|
|
9413b0c654 | ||
|
|
3072a59ea0 | ||
|
|
0e56823865 | ||
|
|
ea80f7d307 | ||
|
|
6638b976ad | ||
|
|
116385718f | ||
|
|
5ad695166a | ||
|
|
d5e0ec597f | ||
|
|
4b86cdd31a | ||
|
|
99527292e0 | ||
|
|
cf79af6fba | ||
|
|
da55c779f2 | ||
|
|
bc6500d917 | ||
|
|
c5b9debe56 | ||
|
|
18f847bbe8 | ||
|
|
6e753ac5fb | ||
|
|
b797ca6ba2 | ||
|
|
4ffabad669 | ||
|
|
bba62ccb93 | ||
|
|
1f856595f5 | ||
|
|
993162dd44 | ||
|
|
ee73336fd8 | ||
|
|
75f655e271 | ||
|
|
76895273f9 | ||
|
|
35d2103fcf | ||
|
|
b069ae009a | ||
|
|
be275deea5 | ||
|
|
a5d3241a54 | ||
|
|
2af9ea8449 | ||
|
|
60fa5aa228 | ||
|
|
a94d22fd1b | ||
|
|
8c70b02088 | ||
|
|
ec58fed521 | ||
|
|
bf9cf0ee1c | ||
|
|
6ebb0b7aa7 | ||
|
|
bb01360776 | ||
|
|
c8bc668e04 | ||
|
|
b64426888b | ||
|
|
1306cf38a1 | ||
|
|
bc06d1be83 | ||
|
|
c9e87bb97d | ||
|
|
05b6d5e343 | ||
|
|
044f75dcec | ||
|
|
6e62af3dbf | ||
|
|
2fac727be3 | ||
|
|
babc4137a4 | ||
|
|
fc033caf21 | ||
|
|
fd524f2172 | ||
|
|
47275831ab | ||
|
|
b8b0fa0565 | ||
|
|
daaa5896a9 | ||
|
|
e27864717d | ||
|
|
e8cf01591e | ||
|
|
d7766cccfd | ||
|
|
3ebffe5a86 | ||
|
|
4e758c18e8 | ||
|
|
1c494f9c44 | ||
|
|
45dfe3011c | ||
|
|
eeb459d6d4 | ||
|
|
f3d91fc69f | ||
|
|
e9dcd4e54d | ||
|
|
8490ed9c8f | ||
|
|
01fb826bd3 | ||
|
|
9b50aa91f8 | ||
|
|
7a5bf8c2f0 | ||
|
|
df06442830 | ||
|
|
180548cc1a | ||
|
|
0aee6cff48 | ||
|
|
7a0257a682 | ||
|
|
f5253faeb0 | ||
|
|
59f499e2cd | ||
|
|
008265db9b | ||
|
|
61190b805b | ||
|
|
8c83840bf9 | ||
|
|
e46b5db6d5 | ||
|
|
b21fb4a655 | ||
|
|
8f3c5f4978 | ||
|
|
6f94e19385 | ||
|
|
3bb99512d8 | ||
|
|
56f49bc78f | ||
|
|
800dd280cd | ||
|
|
8dada79593 | ||
|
|
660e2dba40 | ||
|
|
294ce39231 | ||
|
|
f2031ddcb0 | ||
|
|
7435b5d474 | ||
|
|
b5f177efa0 | ||
|
|
4a4ec4afb7 | ||
|
|
0c33be962d | ||
|
|
511ed214b0 | ||
|
|
3d4263bf5e | ||
|
|
96171413ba | ||
|
|
5578211253 | ||
|
|
08c2fc7621 | ||
|
|
9e45259399 | ||
|
|
e5bbedb602 | ||
|
|
2710fd3781 | ||
|
|
2658864ac0 | ||
|
|
e05de87368 | ||
|
|
293a9ca4b7 | ||
|
|
83151219e7 | ||
|
|
fb0bddf85d | ||
|
|
286f2848fc | ||
|
|
d8d997a25d | ||
|
|
5d7a483dc8 | ||
|
|
58076dd162 | ||
|
|
b6e81fdd4b | ||
|
|
59dfd041a8 | ||
|
|
7bf29ce56d | ||
|
|
c77f1a4ed2 | ||
|
|
7e14c6c7c4 | ||
|
|
b4a0b8dbac | ||
|
|
680b10099d | ||
|
|
e92dda1a37 | ||
|
|
07dc0c61c5 | ||
|
|
cab310e55c | ||
|
|
822a1d4c40 | ||
|
|
1d7368b58d | ||
|
|
70a71f54bc | ||
|
|
3ea0eadcb3 | ||
|
|
41a0a9c994 | ||
|
|
c707f53cec | ||
|
|
9be42e50cc | ||
|
|
bed39d70f0 | ||
|
|
8a0fd10315 | ||
|
|
5e6cd6057b | ||
|
|
b857869371 | ||
|
|
3e764eb564 | ||
|
|
2648749eb8 | ||
|
|
ff43002060 | ||
|
|
4f42a7ae9b | ||
|
|
7d84b68556 | ||
|
|
5b4719634c | ||
|
|
94a0fbdcbb | ||
|
|
bbc3eef1c9 | ||
|
|
3a3e72e9c2 | ||
|
|
e6c14a365f | ||
|
|
2b2ab0b9ad | ||
|
|
16b9311e9e | ||
|
|
1a61470e81 | ||
|
|
c02779b6a5 | ||
|
|
ff6363692c | ||
|
|
34540c3c95 | ||
|
|
ee40f7ff30 | ||
|
|
cece2ee3cc | ||
|
|
fbdf86fdfd | ||
|
|
7bfd4088ce | ||
|
|
18f61b5e64 | ||
|
|
769b4fe34a | ||
|
|
6ffc7e3975 | ||
|
|
31af0d8223 | ||
|
|
0c80f3689d | ||
|
|
9722018847 | ||
|
|
47cfdf7057 | ||
|
|
db6d460677 | ||
|
|
237d9c1a7b | ||
|
|
5b66733ada | ||
|
|
eb1b91d085 | ||
|
|
058056d965 | ||
|
|
f9aa969791 | ||
|
|
4bbfa08dfb | ||
|
|
4b7c2c90ea | ||
|
|
06cab8e2aa | ||
|
|
582641c2e9 | ||
|
|
4d78cd49a0 | ||
|
|
ce56624cea | ||
|
|
d9d6ca64e9 |
30
CONTRIBUTING-descheduler.md
Normal file
30
CONTRIBUTING-descheduler.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Descheduler Design Constraints
|
||||
|
||||
This is a slowly growing document that lists good practices, conventions, and design decisions.
|
||||
|
||||
## Overview
|
||||
|
||||
TBD
|
||||
|
||||
## Code convention
|
||||
|
||||
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
|
||||
|
||||
## Unit Test Conventions
|
||||
|
||||
These are the known conventions that are useful to practice whenever reasonable:
|
||||
|
||||
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
|
||||
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
|
||||
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
|
||||
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
|
||||
|
||||
The aim is to reduce cognitive load when reading and debugging the test code.
|
||||
|
||||
## Design Decisions FAQ
|
||||
|
||||
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
|
||||
|
||||
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
|
||||
|
||||
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.
|
||||
36
README.md
36
README.md
@@ -94,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.34' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -129,7 +129,7 @@ These are top level keys in the Descheduler Policy that you can use to configure
|
||||
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
|
||||
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
|
||||
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
|
||||
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
|
||||
| `gracePeriodSeconds` | `int` | `nil` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. |
|
||||
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
|
||||
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
|
||||
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
|
||||
@@ -189,6 +189,31 @@ The Default Evictor Plugin is used by default for filtering pods before processi
|
||||
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
|
||||
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
|
||||
|
||||
|
||||
#### Protecting pods using specific Storage Classes
|
||||
|
||||
With the `PodsWithPVC` protection enabled all pods using PVCs are protected from eviction by default, if needed you can restrict the protection by filtering by PVC storage class. When filtering out by storage class, only pods using PVCs with the specified storage classes are protected from eviction. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- PodsWithPVC
|
||||
config:
|
||||
PodsWithPVC:
|
||||
protectedStorageClasses:
|
||||
- name: storage-class-0
|
||||
- name: storage-class-1
|
||||
|
||||
```
|
||||
This example will protect pods using PVCs with storage classes `storage-class-0` and `storage-class-1` from eviction.
|
||||
|
||||
### Example policy
|
||||
|
||||
As part of the policy, you will start deciding which top level configuration to use, then which Evictor plugin to use (if you have your own, the Default Evictor if not), followed by deciding the configuration passed to the Evictor Plugin. By default, the Default Evictor is enabled for both `filter` and `preEvictionFilter` extension points. After that you will enable/disable eviction strategies plugins and configure them properly.
|
||||
@@ -229,6 +254,7 @@ profiles:
|
||||
#- "PodsWithPVC"
|
||||
#- "PodsWithoutPDB"
|
||||
#- "PodsWithResourceClaims"
|
||||
config: {}
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
@@ -1152,7 +1178,7 @@ that the only people who can get things done around here are the "maintainers".
|
||||
We also would love to add more "official" maintainers, so show us what you can
|
||||
do!
|
||||
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here][prow].
|
||||
This repository uses the Kubernetes bots. See a full list of the commands [here](https://go.k8s.io/bot-commands).
|
||||
|
||||
### Communicating With Contributors
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.33.0
|
||||
appVersion: 0.33.0
|
||||
version: 0.34.0
|
||||
appVersion: 0.34.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -35,6 +35,9 @@ rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
2
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
2
pkg/api/v1alpha2/zz_generated.defaults.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
2
pkg/api/zz_generated.deepcopy.go
generated
2
pkg/api/zz_generated.deepcopy.go
generated
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -74,6 +74,7 @@ import (
|
||||
const (
|
||||
prometheusAuthTokenSecretKey = "prometheusAuthToken"
|
||||
workQueueKey = "key"
|
||||
indexerNodeSelectorGlobal = "indexer_node_selector_global"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
@@ -164,7 +165,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
|
||||
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
@@ -206,15 +207,20 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
|
||||
if err := nodeutil.AddNodeSelectorIndexer(sharedInformerFactory.Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
@@ -345,7 +351,7 @@ func (d *descheduler) eventHandler() cache.ResourceEventHandler {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
|
||||
defer span.End()
|
||||
@@ -354,12 +360,6 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
|
||||
return nil // gracefully skip this cycle instead of aborting
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
@@ -384,6 +384,22 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
nodeSelector := labels.Everything()
|
||||
if d.deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*d.deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
// TODO(ingvagabund): copy paste all relevant indexers from the real client to the fake one
|
||||
// TODO(ingvagabund): register one indexer per each profile. Respect the precedence of no profile-level node selector is specified.
|
||||
// Also, keep a cache of node label selectors to detect duplicates to avoid creating an extra informer.
|
||||
|
||||
if err := nodeutil.AddNodeSelectorIndexer(fakeSharedInformerFactory.Core().V1().Nodes().Informer(), indexerNodeSelectorGlobal, nodeSelector); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
@@ -399,7 +415,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
d.podEvictor.SetClient(client)
|
||||
d.podEvictor.ResetCounters()
|
||||
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
d.runProfiles(ctx, client)
|
||||
|
||||
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
|
||||
|
||||
@@ -409,12 +425,33 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
|
||||
nodesAsInterface, err := d.sharedInformerFactory.Core().V1().Nodes().Informer().GetIndexer().ByIndex(indexerNodeSelectorGlobal, indexerNodeSelectorGlobal)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to list nodes with global node selector", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
nodes, err := nodeutil.ReadyNodesFromInterfaces(nodesAsInterface)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to convert node as interfaces into ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
|
||||
return // gracefully skip this cycle instead of aborting
|
||||
}
|
||||
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
for idx, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
ctx,
|
||||
profile,
|
||||
@@ -425,6 +462,9 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
frameworkprofile.WithPrometheusClient(d.prometheusClient),
|
||||
// Generate a unique instance ID using just the index to avoid long IDs
|
||||
// when profile names are very long
|
||||
frameworkprofile.WithProfileInstanceID(fmt.Sprintf("%d", idx)),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -587,11 +627,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
nodeSelector = *deschedulerPolicy.NodeSelector
|
||||
}
|
||||
|
||||
var eventClient clientset.Interface
|
||||
if rs.DryRun {
|
||||
eventClient = fakeclientset.NewSimpleClientset()
|
||||
@@ -666,14 +701,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
err = descheduler.runDeschedulerLoop(sCtx, nodes)
|
||||
err = descheduler.runDeschedulerLoop(sCtx)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
|
||||
@@ -177,7 +177,7 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, dryRun bool, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
@@ -189,6 +189,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = featureGates
|
||||
rs.MetricsClient = metricsClient
|
||||
rs.DryRun = dryRun
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
@@ -477,70 +478,72 @@ func taintNodeNoSchedule(node *v1.Node) {
|
||||
func TestPodEvictorReset(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
tests := []struct {
|
||||
name string
|
||||
dryRun bool
|
||||
cycles []struct {
|
||||
expectedTotalEvicted uint
|
||||
expectedRealEvictions int
|
||||
expectedFakeEvictions int
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "real mode",
|
||||
dryRun: false,
|
||||
cycles: []struct {
|
||||
expectedTotalEvicted uint
|
||||
expectedRealEvictions int
|
||||
expectedFakeEvictions int
|
||||
}{
|
||||
{expectedTotalEvicted: 2, expectedRealEvictions: 2, expectedFakeEvictions: 0},
|
||||
{expectedTotalEvicted: 2, expectedRealEvictions: 4, expectedFakeEvictions: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dry mode",
|
||||
dryRun: true,
|
||||
cycles: []struct {
|
||||
expectedTotalEvicted uint
|
||||
expectedRealEvictions int
|
||||
expectedFakeEvictions int
|
||||
}{
|
||||
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 2},
|
||||
{expectedTotalEvicted: 2, expectedRealEvictions: 0, expectedFakeEvictions: 4},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, test.SetRSOwnerRef)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, test.SetRSOwnerRef)
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, tc.dryRun, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// check the fake client syncing and the right pods evicted
|
||||
klog.Infof("Enabling the dry run mode")
|
||||
rs.DryRun = true
|
||||
evictedPods = []string{}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
for i, cycle := range tc.cycles {
|
||||
if err := descheduler.runDeschedulerLoop(ctx); err != nil {
|
||||
t.Fatalf("Cycle %d: Unable to run a descheduling loop: %v", i+1, err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != cycle.expectedTotalEvicted || len(evictedPods) != cycle.expectedRealEvictions || len(fakeEvictedPods) != cycle.expectedFakeEvictions {
|
||||
t.Fatalf("Cycle %d: Expected (%v,%v,%v) pods evicted, got (%v,%v,%v) instead", i+1, cycle.expectedTotalEvicted, cycle.expectedRealEvictions, cycle.expectedFakeEvictions, descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -555,7 +558,7 @@ func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, to
|
||||
}
|
||||
|
||||
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
err := descheduler.runDeschedulerLoop(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
@@ -595,7 +598,7 @@ func TestEvictionRequestsCache(t *testing.T) {
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, false, node1, node2, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
@@ -731,13 +734,12 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, false, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
@@ -774,7 +776,7 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
err := descheduler.runDeschedulerLoop(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
@@ -790,6 +792,219 @@ func TestDeschedulingLimits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeLabelSelectorBasedEviction(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
// createNodes creates 4 nodes with different labels and applies a taint to all of them
|
||||
createNodes := func() (*v1.Node, *v1.Node, *v1.Node, *v1.Node) {
|
||||
taint := []v1.Taint{
|
||||
{
|
||||
Key: "test-taint",
|
||||
Value: "test-value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Labels = map[string]string{
|
||||
"zone": "us-east-1a",
|
||||
"node-type": "compute",
|
||||
"environment": "production",
|
||||
}
|
||||
node.Spec.Taints = taint
|
||||
})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Labels = map[string]string{
|
||||
"zone": "us-east-1b",
|
||||
"node-type": "compute",
|
||||
"environment": "production",
|
||||
}
|
||||
node.Spec.Taints = taint
|
||||
})
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Labels = map[string]string{
|
||||
"zone": "us-west-1a",
|
||||
"node-type": "storage",
|
||||
"environment": "staging",
|
||||
}
|
||||
node.Spec.Taints = taint
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Labels = map[string]string{
|
||||
"zone": "us-west-1b",
|
||||
"node-type": "storage",
|
||||
"environment": "staging",
|
||||
}
|
||||
node.Spec.Taints = taint
|
||||
})
|
||||
return node1, node2, node3, node4
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodeSelector string
|
||||
dryRun bool
|
||||
expectedEvictedFromNodes []string
|
||||
}{
|
||||
{
|
||||
description: "Evict from n1, n2",
|
||||
nodeSelector: "environment=production",
|
||||
dryRun: false,
|
||||
expectedEvictedFromNodes: []string{"n1", "n2"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n1, n2 in dry run mode",
|
||||
nodeSelector: "environment=production",
|
||||
dryRun: true,
|
||||
expectedEvictedFromNodes: []string{"n1", "n2"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n3, n4",
|
||||
nodeSelector: "environment=staging",
|
||||
dryRun: false,
|
||||
expectedEvictedFromNodes: []string{"n3", "n4"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n3, n4 in dry run mode",
|
||||
nodeSelector: "environment=staging",
|
||||
dryRun: true,
|
||||
expectedEvictedFromNodes: []string{"n3", "n4"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n1, n4",
|
||||
nodeSelector: "zone in (us-east-1a, us-west-1b)",
|
||||
dryRun: false,
|
||||
expectedEvictedFromNodes: []string{"n1", "n4"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n1, n4 in dry run mode",
|
||||
nodeSelector: "zone in (us-east-1a, us-west-1b)",
|
||||
dryRun: true,
|
||||
expectedEvictedFromNodes: []string{"n1", "n4"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n2, n3",
|
||||
nodeSelector: "zone in (us-east-1b, us-west-1a)",
|
||||
dryRun: false,
|
||||
expectedEvictedFromNodes: []string{"n2", "n3"},
|
||||
},
|
||||
{
|
||||
description: "Evict from n2, n3 in dry run mode",
|
||||
nodeSelector: "zone in (us-east-1b, us-west-1a)",
|
||||
dryRun: true,
|
||||
expectedEvictedFromNodes: []string{"n2", "n3"},
|
||||
},
|
||||
{
|
||||
description: "Evict from all nodes",
|
||||
nodeSelector: "",
|
||||
dryRun: false,
|
||||
expectedEvictedFromNodes: []string{"n1", "n2", "n3", "n4"},
|
||||
},
|
||||
{
|
||||
description: "Evict from all nodes in dry run mode",
|
||||
nodeSelector: "",
|
||||
dryRun: true,
|
||||
expectedEvictedFromNodes: []string{"n1", "n2", "n3", "n4"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create nodes with different labels and taints
|
||||
node1, node2, node3, node4 := createNodes()
|
||||
|
||||
ownerRef := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
|
||||
// Create one pod per node
|
||||
p1 := test.BuildTestPod("p1", 200, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 200, 0, node2.Name, updatePod)
|
||||
p3 := test.BuildTestPod("p3", 200, 0, node3.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 200, 0, node4.Name, updatePod)
|
||||
|
||||
// Map pod names to their node names for validation
|
||||
podToNode := map[string]string{
|
||||
"p1": "n1",
|
||||
"p2": "n2",
|
||||
"p3": "n3",
|
||||
"p4": "n4",
|
||||
}
|
||||
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
if tc.nodeSelector != "" {
|
||||
policy.NodeSelector = &tc.nodeSelector
|
||||
}
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, deschedulerInstance, client := initDescheduler(t, ctxCancel, initFeatureGates(), policy, nil, tc.dryRun, node1, node2, node3, node4, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
// Verify all pods are created initially
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods: %v", err)
|
||||
}
|
||||
if len(pods.Items) != 4 {
|
||||
t.Errorf("Expected 4 pods initially, got %d", len(pods.Items))
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
if !tc.dryRun {
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
} else {
|
||||
deschedulerInstance.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&evictedPods, nil, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Run descheduler
|
||||
if err := deschedulerInstance.runDeschedulerLoop(ctx); err != nil {
|
||||
t.Fatalf("Unable to run descheduler loop: %v", err)
|
||||
}
|
||||
|
||||
// Collect which nodes had pods evicted from them
|
||||
nodesWithEvictedPods := make(map[string]bool)
|
||||
for _, podName := range evictedPods {
|
||||
if nodeName, ok := podToNode[podName]; ok {
|
||||
nodesWithEvictedPods[nodeName] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the correct number of nodes had pods evicted
|
||||
if len(nodesWithEvictedPods) != len(tc.expectedEvictedFromNodes) {
|
||||
t.Errorf("Expected pods to be evicted from %d nodes, got %d nodes: %v", len(tc.expectedEvictedFromNodes), len(nodesWithEvictedPods), nodesWithEvictedPods)
|
||||
}
|
||||
|
||||
// Verify pods were evicted from the correct nodes
|
||||
for _, nodeName := range tc.expectedEvictedFromNodes {
|
||||
if !nodesWithEvictedPods[nodeName] {
|
||||
t.Errorf("Expected pod to be evicted from node %s, but it was not", nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify no unexpected nodes had pods evicted
|
||||
for nodeName := range nodesWithEvictedPods {
|
||||
found := false
|
||||
for _, expectedNode := range tc.expectedEvictedFromNodes {
|
||||
if nodeName == expectedNode {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Unexpected eviction from node %s", nodeName)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Successfully evicted pods from nodes: %v", tc.expectedEvictedFromNodes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
@@ -801,7 +1016,6 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
|
||||
@@ -850,6 +1064,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initFeatureGates(),
|
||||
policy,
|
||||
metricsClientset,
|
||||
false,
|
||||
node1, node2, p1, p2, p3, p4, p5)
|
||||
defer cancel()
|
||||
|
||||
@@ -857,7 +1072,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
||||
// after newDescheduler in RunDeschedulerStrategies.
|
||||
descheduler.metricsCollector.Collect(ctx)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
err := descheduler.runDeschedulerLoop(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -78,6 +79,22 @@ func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister list
|
||||
return readyNodes, nil
|
||||
}
|
||||
|
||||
// ReadyNodesFromInterfaces converts a list of interface{} items to ready nodes.
|
||||
// Each interface{} item is expected to be a *v1.Node. Only ready nodes are returned.
|
||||
func ReadyNodesFromInterfaces(nodeInterfaces []interface{}) ([]*v1.Node, error) {
|
||||
readyNodes := make([]*v1.Node, 0, len(nodeInterfaces))
|
||||
for i, nodeInterface := range nodeInterfaces {
|
||||
node, ok := nodeInterface.(*v1.Node)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("item at index %d is not a *v1.Node", i)
|
||||
}
|
||||
if IsReady(node) {
|
||||
readyNodes = append(readyNodes, node)
|
||||
}
|
||||
}
|
||||
return readyNodes, nil
|
||||
}
|
||||
|
||||
// IsReady checks if the descheduler could run against given node.
|
||||
func IsReady(node *v1.Node) bool {
|
||||
for i := range node.Status.Conditions {
|
||||
@@ -241,7 +258,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
// check pod num, at least one pod number is available
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
@@ -400,3 +417,22 @@ func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFun
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
|
||||
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
|
||||
func AddNodeSelectorIndexer(nodeInformer cache.SharedIndexInformer, indexerName string, nodeSelector labels.Selector) error {
|
||||
return nodeInformer.AddIndexers(cache.Indexers{
|
||||
indexerName: func(obj interface{}) ([]string, error) {
|
||||
node, ok := obj.(*v1.Node)
|
||||
if !ok {
|
||||
return []string{}, errors.New("unexpected object")
|
||||
}
|
||||
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
return []string{indexerName}, nil
|
||||
}
|
||||
|
||||
return []string{}, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,15 +19,20 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -78,13 +83,205 @@ func TestReadyNodesWithNodeSelector(t *testing.T) {
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
// First verify nodeLister returns non-empty list
|
||||
allNodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list nodes from nodeLister: %v", err)
|
||||
}
|
||||
if len(allNodes) == 0 {
|
||||
t.Fatal("Expected nodeLister to return non-empty list of nodes")
|
||||
}
|
||||
if len(allNodes) != 2 {
|
||||
t.Errorf("Expected nodeLister to return 2 nodes, got %d", len(allNodes))
|
||||
}
|
||||
|
||||
// Now test ReadyNodes
|
||||
nodes, _ := ReadyNodes(ctx, fakeClient, nodeLister, nodeSelector)
|
||||
|
||||
if nodes[0].Name != "node1" {
|
||||
if len(nodes) != 1 {
|
||||
t.Errorf("Expected 1 node, got %d", len(nodes))
|
||||
} else if nodes[0].Name != "node1" {
|
||||
t.Errorf("Expected node1, got %s", nodes[0].Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadyNodesFromInterfaces(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}
|
||||
node3 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
nodeInterfaces []interface{}
|
||||
expectedCount int
|
||||
expectedNames []string
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
description: "All nodes are ready",
|
||||
nodeInterfaces: []interface{}{node1, node3},
|
||||
expectedCount: 2,
|
||||
expectedNames: []string{"node1", "node3"},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "One node is not ready",
|
||||
nodeInterfaces: []interface{}{node1, node2, node3},
|
||||
expectedCount: 2,
|
||||
expectedNames: []string{"node1", "node3"},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Empty list",
|
||||
nodeInterfaces: []interface{}{},
|
||||
expectedCount: 0,
|
||||
expectedNames: []string{},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Invalid type in list",
|
||||
nodeInterfaces: []interface{}{node1, "not a node", node3},
|
||||
expectedCount: 0,
|
||||
expectError: true,
|
||||
errorContains: "item at index 1 is not a *v1.Node",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
nodes, err := ReadyNodesFromInterfaces(tc.nodeInterfaces)
|
||||
|
||||
if tc.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
} else if tc.errorContains != "" && !strings.Contains(err.Error(), tc.errorContains) {
|
||||
t.Errorf("Expected error to contain '%s', got '%s'", tc.errorContains, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if len(nodes) != tc.expectedCount {
|
||||
t.Errorf("Expected %d nodes, got %d", tc.expectedCount, len(nodes))
|
||||
}
|
||||
|
||||
for i, expectedName := range tc.expectedNames {
|
||||
if i >= len(nodes) {
|
||||
t.Errorf("Missing node at index %d, expected %s", i, expectedName)
|
||||
continue
|
||||
}
|
||||
if nodes[i].Name != expectedName {
|
||||
t.Errorf("Expected node at index %d to be %s, got %s", i, expectedName, nodes[i].Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeSelectorIndexer(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
node1.Labels = map[string]string{"type": "compute", "zone": "us-east-1"}
|
||||
node2 := test.BuildTestNode("node2", 1000, 2000, 9, nil)
|
||||
node2.Labels = map[string]string{"type": "infra", "zone": "us-west-1"}
|
||||
node3 := test.BuildTestNode("node3", 1000, 2000, 9, nil)
|
||||
node3.Labels = map[string]string{"type": "compute", "zone": "us-west-1"}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
indexerName string
|
||||
selectorString string
|
||||
expectedMatches []string
|
||||
}{
|
||||
{
|
||||
description: "Index nodes by type=compute",
|
||||
indexerName: "computeNodes",
|
||||
selectorString: "type=compute",
|
||||
expectedMatches: []string{"node1", "node3"},
|
||||
},
|
||||
{
|
||||
description: "Index nodes by type=infra",
|
||||
indexerName: "infraNodes",
|
||||
selectorString: "type=infra",
|
||||
expectedMatches: []string{"node2"},
|
||||
},
|
||||
{
|
||||
description: "Index nodes by zone=us-west-1",
|
||||
indexerName: "westZoneNodes",
|
||||
selectorString: "zone=us-west-1",
|
||||
expectedMatches: []string{"node2", "node3"},
|
||||
},
|
||||
{
|
||||
description: "Index nodes with multiple labels",
|
||||
indexerName: "computeEastNodes",
|
||||
selectorString: "type=compute,zone=us-east-1",
|
||||
expectedMatches: []string{"node1"},
|
||||
},
|
||||
{
|
||||
description: "No matching nodes",
|
||||
indexerName: "noMatchNodes",
|
||||
selectorString: "type=storage",
|
||||
expectedMatches: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset(node1, node2, node3)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
nodeInformer := sharedInformerFactory.Core().V1().Nodes().Informer()
|
||||
|
||||
selector, err := labels.Parse(tc.selectorString)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse selector: %v", err)
|
||||
}
|
||||
|
||||
err = AddNodeSelectorIndexer(nodeInformer, tc.indexerName, selector)
|
||||
if err != nil {
|
||||
t.Fatalf("AddNodeSelectorIndexer failed: %v", err)
|
||||
}
|
||||
|
||||
stopChannel := make(chan struct{})
|
||||
sharedInformerFactory.Start(stopChannel)
|
||||
sharedInformerFactory.WaitForCacheSync(stopChannel)
|
||||
defer close(stopChannel)
|
||||
|
||||
indexer := nodeInformer.GetIndexer()
|
||||
objs, err := indexer.ByIndex(tc.indexerName, tc.indexerName)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to query indexer: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract node names from the results
|
||||
actualMatches := make([]string, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
node, ok := obj.(*v1.Node)
|
||||
if !ok {
|
||||
t.Errorf("Expected *v1.Node, got %T", obj)
|
||||
continue
|
||||
}
|
||||
actualMatches = append(actualMatches, node.Name)
|
||||
}
|
||||
|
||||
// Sort both slices for consistent comparison
|
||||
sort.Strings(actualMatches)
|
||||
expectedMatches := make([]string, len(tc.expectedMatches))
|
||||
copy(expectedMatches, tc.expectedMatches)
|
||||
sort.Strings(expectedMatches)
|
||||
|
||||
// Compare using cmp.Diff
|
||||
if diff := cmp.Diff(expectedMatches, actualMatches); diff != "" {
|
||||
t.Errorf("Node matches mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNodeUnschedulable(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -1020,6 +1217,64 @@ func TestNodeFit(t *testing.T) {
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much cpu does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100000, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with native sidecars with too much memory does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 1000*1000*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient memory"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small native sidecars fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
|
||||
RestartPolicy: ptr.To(v1.ContainerRestartPolicyAlways), // native sidecar
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: createResourceList(100, 100*1000*1000, 0),
|
||||
},
|
||||
})
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
{
|
||||
description: "Pod with large overhead does not fit on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(100000, 100*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Pod with small overhead fits on node",
|
||||
pod: test.BuildTestPod("p1", 1, 100, "", func(pod *v1.Pod) {
|
||||
pod.Spec.Overhead = createResourceList(1, 1*1000*1000, 0)
|
||||
}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
||||
@@ -23,6 +23,7 @@ type HandleImpl struct {
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
PrometheusClientImpl promapi.Client
|
||||
PluginInstanceIDImpl string
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -62,3 +63,7 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
|
||||
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) PluginInstanceID() string {
|
||||
return hi.PluginInstanceIDImpl
|
||||
}
|
||||
|
||||
@@ -73,6 +73,22 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
func NewPluginFncFromFakeWithReactor(fp *FakePlugin, callback func(ActionImpl)) pluginregistry.PluginBuilder {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
}
|
||||
|
||||
fp.handle = handle
|
||||
fp.args = fakePluginArgs
|
||||
|
||||
callback(ActionImpl{handle: fp.handle})
|
||||
|
||||
return fp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
@@ -408,3 +424,55 @@ func (d *FakeFilterPlugin) handleBoolAction(action Action) bool {
|
||||
}
|
||||
panic(fmt.Errorf("unhandled %q action", action.GetExtensionPoint()))
|
||||
}
|
||||
|
||||
// RegisterFakePlugin registers a FakePlugin with the given registry
|
||||
func RegisterFakePlugin(name string, plugin *FakePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewPluginFncFromFake(plugin),
|
||||
&FakePlugin{},
|
||||
&FakePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeDeschedulePlugin registers a FakeDeschedulePlugin with the given registry
|
||||
func RegisterFakeDeschedulePlugin(name string, plugin *FakeDeschedulePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeDeschedulePluginFncFromFake(plugin),
|
||||
&FakeDeschedulePlugin{},
|
||||
&FakeDeschedulePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeBalancePlugin registers a FakeBalancePlugin with the given registry
|
||||
func RegisterFakeBalancePlugin(name string, plugin *FakeBalancePlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeBalancePluginFncFromFake(plugin),
|
||||
&FakeBalancePlugin{},
|
||||
&FakeBalancePluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterFakeFilterPlugin registers a FakeFilterPlugin with the given registry
|
||||
func RegisterFakeFilterPlugin(name string, plugin *FakeFilterPlugin, registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
name,
|
||||
NewFakeFilterPluginFncFromFake(plugin),
|
||||
&FakeFilterPlugin{},
|
||||
&FakeFilterPluginArgs{},
|
||||
ValidateFakePluginArgs,
|
||||
SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -17,11 +17,13 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -122,13 +124,67 @@ func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtect
|
||||
applyFailedBarePodsProtection(d, protectionMap)
|
||||
applyLocalStoragePodsProtection(d, protectionMap)
|
||||
applyDaemonSetPodsProtection(d, protectionMap)
|
||||
applyPvcPodsProtection(d, protectionMap)
|
||||
applyPVCPodsProtection(d, protectionMap)
|
||||
applyPodsWithoutPDBProtection(d, protectionMap, handle)
|
||||
applyPodsWithResourceClaimsProtection(d, protectionMap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// protectedPVCStorageClasses returns the list of storage classes that should
|
||||
// be protected from eviction. If the list is empty or nil then all storage
|
||||
// classes are protected (assuming PodsWithPVC protection is enabled).
|
||||
func protectedPVCStorageClasses(d *DefaultEvictor) []ProtectedStorageClass {
|
||||
protcfg := d.args.PodProtections.Config
|
||||
if protcfg == nil {
|
||||
return nil
|
||||
}
|
||||
scconfig := protcfg.PodsWithPVC
|
||||
if scconfig == nil {
|
||||
return nil
|
||||
}
|
||||
return scconfig.ProtectedStorageClasses
|
||||
}
|
||||
|
||||
// podStorageClasses returns a list of storage classes referred by a pod. We
|
||||
// need this when assessing if a pod should be protected because it refers to a
|
||||
// protected storage class.
|
||||
func podStorageClasses(inf informers.SharedInformerFactory, pod *v1.Pod) ([]string, error) {
|
||||
lister := inf.Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(
|
||||
pod.Namespace,
|
||||
)
|
||||
|
||||
referred := map[string]bool{}
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
claim, err := lister.Get(vol.PersistentVolumeClaim.ClaimName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to get persistent volume claim %q/%q: %w",
|
||||
pod.Namespace, vol.PersistentVolumeClaim.ClaimName, err,
|
||||
)
|
||||
}
|
||||
|
||||
// this should never happen as once a pvc is created with a nil
|
||||
// storageClass it is automatically picked up by the default
|
||||
// storage class. By returning an error here we make the pod
|
||||
// protected from eviction.
|
||||
if claim.Spec.StorageClassName == nil || *claim.Spec.StorageClassName == "" {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to resolve storage class for pod %q/%q",
|
||||
pod.Namespace, claim.Name,
|
||||
)
|
||||
}
|
||||
|
||||
referred[*claim.Spec.StorageClassName] = true
|
||||
}
|
||||
|
||||
return slices.Collect(maps.Keys(referred)), nil
|
||||
}
|
||||
|
||||
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[FailedBarePods]
|
||||
if !isProtectionEnabled {
|
||||
@@ -206,16 +262,50 @@ func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtec
|
||||
}
|
||||
}
|
||||
|
||||
func applyPvcPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithPVC]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod with PVC is protected against eviction")
|
||||
// applyPVCPodsProtection protects pods that refer to a PVC from eviction. If
|
||||
// the user has specified a list of storage classes to protect then only pods
|
||||
// referring to PVCs of those storage classes are protected.
|
||||
func applyPVCPodsProtection(d *DefaultEvictor, enabledProtections map[PodProtection]bool) {
|
||||
if !enabledProtections[PodsWithPVC] {
|
||||
return
|
||||
}
|
||||
|
||||
// if the user isn't filtering by storage classes we protect all pods
|
||||
// referring to a PVC.
|
||||
protected := protectedPVCStorageClasses(d)
|
||||
if len(protected) == 0 {
|
||||
d.constraints = append(
|
||||
d.constraints,
|
||||
func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod with PVC is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
protectedsc := map[string]bool{}
|
||||
for _, class := range protected {
|
||||
protectedsc[class.Name] = true
|
||||
}
|
||||
|
||||
d.constraints = append(
|
||||
d.constraints, func(pod *v1.Pod) error {
|
||||
classes, err := podStorageClasses(d.handle.SharedInformerFactory(), pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, class := range classes {
|
||||
if !protectedsc[class] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("pod using protected storage class %q", class)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -75,6 +75,37 @@ type PodProtections struct {
|
||||
// DefaultDisabled specifies which default protection policies should be disabled.
|
||||
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
|
||||
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
|
||||
|
||||
// Config holds configuration for pod protection policies. Depending on
|
||||
// the enabled policies this may be required. For instance, when
|
||||
// enabling the PodsWithPVC policy the user may specify which storage
|
||||
// classes should be protected.
|
||||
Config *PodProtectionsConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtectionsConfig holds configuration for pod protection policies. The
|
||||
// name of the fields here must be equal to a protection name. This struct is
|
||||
// meant to be extended as more protection policies are added.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtectionsConfig struct {
|
||||
PodsWithPVC *PodsWithPVCConfig `json:"PodsWithPVC,omitempty"`
|
||||
}
|
||||
|
||||
// PodsWithPVCConfig holds configuration for the PodsWithPVC protection.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodsWithPVCConfig struct {
|
||||
// ProtectedStorageClasses is a list of storage classes that we want to
|
||||
// protect. i.e. if a pod refers to one of these storage classes it is
|
||||
// protected from being evicted. If none is provided then all pods with
|
||||
// PVCs are protected from eviction.
|
||||
ProtectedStorageClasses []ProtectedStorageClass `json:"protectedStorageClasses,omitempty"`
|
||||
}
|
||||
|
||||
// ProtectedStorageClass is used to determine what storage classes are
|
||||
// protected when the PodsWithPVC protection is enabled. This object exists
|
||||
// so we can later on extend it with more configuration if needed.
|
||||
type ProtectedStorageClass struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// defaultPodProtections holds the list of protection policies that are enabled by default.
|
||||
|
||||
@@ -72,6 +72,17 @@ func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
if hasDuplicates(args.PodProtections.ExtraEnabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if slices.Contains(args.PodProtections.ExtraEnabled, PodsWithPVC) {
|
||||
if args.PodProtections.Config != nil && args.PodProtections.Config.PodsWithPVC != nil {
|
||||
protectedsc := args.PodProtections.Config.PodsWithPVC.ProtectedStorageClasses
|
||||
for i, sc := range protectedsc {
|
||||
if sc.Name == "" {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[%d] name cannot be empty", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
|
||||
@@ -198,6 +198,33 @@ func TestValidateDefaultEvictorArgs(t *testing.T) {
|
||||
},
|
||||
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
|
||||
},
|
||||
{
|
||||
name: "Protected storage classes without storage class name",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
Config: &PodProtectionsConfig{
|
||||
PodsWithPVC: &PodsWithPVCConfig{
|
||||
ProtectedStorageClasses: []ProtectedStorageClass{
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-0",
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
},
|
||||
{
|
||||
Name: "protected-storage-class-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[0] name cannot be empty, PodProtections.Config.PodsWithPVC.ProtectedStorageClasses[2] name cannot be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -81,6 +81,11 @@ func (in *PodProtections) DeepCopyInto(out *PodProtections) {
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
*out = new(PodProtectionsConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -93,3 +98,45 @@ func (in *PodProtections) DeepCopy() *PodProtections {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtectionsConfig) DeepCopyInto(out *PodProtectionsConfig) {
|
||||
*out = *in
|
||||
if in.PodsWithPVC != nil {
|
||||
in, out := &in.PodsWithPVC, &out.PodsWithPVC
|
||||
*out = new(PodsWithPVCConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtectionsConfig.
|
||||
func (in *PodProtectionsConfig) DeepCopy() *PodProtectionsConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtectionsConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsWithPVCConfig) DeepCopyInto(out *PodsWithPVCConfig) {
|
||||
*out = *in
|
||||
if in.ProtectedStorageClasses != nil {
|
||||
in, out := &in.ProtectedStorageClasses, &out.ProtectedStorageClasses
|
||||
*out = make([]ProtectedStorageClass, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsWithPVCConfig.
|
||||
func (in *PodsWithPVCConfig) DeepCopy() *PodsWithPVCConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsWithPVCConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
@@ -97,17 +96,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -115,8 +104,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
|
||||
@@ -168,8 +156,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -249,9 +236,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
@@ -466,9 +451,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// pods in the other nodes must not be evicted
|
||||
// because they do not have the extended
|
||||
// resource defined in their requests.
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
@@ -92,25 +91,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -155,25 +143,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -233,25 +210,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -310,17 +276,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -328,8 +284,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -379,17 +334,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -397,8 +342,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -462,17 +406,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -480,8 +414,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -525,9 +458,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
@@ -537,23 +468,11 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -561,8 +480,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -632,17 +550,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -651,8 +559,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
@@ -747,12 +654,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -795,17 +698,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -813,8 +706,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -872,17 +764,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -890,8 +772,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
@@ -975,17 +856,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -993,8 +864,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1037,17 +907,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1055,8 +915,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1106,17 +965,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1124,8 +973,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1218,17 +1066,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
@@ -1236,8 +1074,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1282,25 +1119,14 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
@@ -1575,17 +1401,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
func withLocalStorage(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
@@ -1594,8 +1410,7 @@ func withCriticalPod(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
155
pkg/framework/plugins/podlifetime/README.md
Normal file
155
pkg/framework/plugins/podlifetime/README.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# PodLifeTime Plugin
|
||||
|
||||
## What It Does
|
||||
|
||||
The PodLifeTime plugin evicts pods that have been running for too long. You can configure a maximum age threshold, and the plugin evicts pods older than that threshold. The oldest pods are evicted first.
|
||||
|
||||
## How It Works
|
||||
|
||||
The plugin examines all pods across your nodes and selects those that exceed the configured age threshold. You can further narrow down which pods are considered by specifying:
|
||||
|
||||
- Which namespaces to include or exclude
|
||||
- Which labels pods must have
|
||||
- Which states pods must be in (e.g., Running, Pending, CrashLoopBackOff)
|
||||
|
||||
Once pods are selected, they are sorted by age (oldest first) and evicted in that order. Eviction stops when limits are reached (per-node limits, total limits, or Pod Disruption Budget constraints).
|
||||
|
||||
## Use Cases
|
||||
|
||||
- **Resource Leakage Mitigation**: Restart long-running pods that may have accumulated memory leaks, stale cache, or resource leaks
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states: [Running]
|
||||
```
|
||||
|
||||
- **Ephemeral Workload Cleanup**: Remove long-running batch jobs, test pods, or temporary workloads that have exceeded their expected lifetime
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 7200 # 2 hours
|
||||
states: [Succeeded, Failed]
|
||||
```
|
||||
|
||||
- **Node Hygiene**: Remove forgotten or stuck pods that are consuming resources but not making progress
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 3600 # 1 hour
|
||||
states: [CrashLoopBackOff, ImagePullBackOff, ErrImagePull]
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
- **Config/Secret Update Pickup**: Force pod restart to pick up updated ConfigMaps, Secrets, or environment variables
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
states: [Running]
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
config-refresh: enabled
|
||||
```
|
||||
|
||||
- **Security Rotation**: Periodically refresh pods to pick up new security tokens, certificates, or patched container images
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 259200 # 3 days
|
||||
states: [Running]
|
||||
namespaces:
|
||||
exclude: [kube-system]
|
||||
```
|
||||
|
||||
- **Dev/Test Environment Cleanup**: Automatically clean up old pods in development or staging namespaces
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
namespaces:
|
||||
include: [dev, staging, test]
|
||||
```
|
||||
|
||||
- **Cluster Health Freshness**: Ensure pods periodically restart to maintain cluster health and verify workloads can recover from restarts
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states: [Running]
|
||||
namespaces:
|
||||
exclude: [kube-system, production]
|
||||
```
|
||||
|
||||
- **Rebalancing Assistance**: Work alongside other descheduler strategies by removing old pods to allow better pod distribution
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 1209600 # 14 days
|
||||
states: [Running]
|
||||
```
|
||||
|
||||
- **Non-Critical Stateful Refresh**: Occasionally reset tolerable stateful workloads that can handle data loss or have external backup mechanisms
|
||||
```yaml
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 2592000 # 30 days
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
stateful-tier: cache
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Type | Required | Default |
|
||||
|-----------|-------------|------|----------|---------|
|
||||
| `maxPodLifeTimeSeconds` | Pods older than this many seconds are evicted | `uint` | Yes | - |
|
||||
| `namespaces` | Limit eviction to specific namespaces (or exclude specific namespaces) | `Namespaces` | No | `nil` |
|
||||
| `labelSelector` | Only evict pods matching these labels | `metav1.LabelSelector` | No | `nil` |
|
||||
| `states` | Only evict pods in specific states (e.g., Running, CrashLoopBackOff) | `[]string` | No | `nil` |
|
||||
| `includingInitContainers` | When checking states, also check init container states | `bool` | No | `false` |
|
||||
| `includingEphemeralContainers` | When checking states, also check ephemeral container states | `bool` | No | `false` |
|
||||
|
||||
### Discovering states
|
||||
|
||||
Each pod is checked for the following locations to discover its relevant state:
|
||||
|
||||
1. **Pod Phase** - The overall pod lifecycle phase:
|
||||
- `Running` - Pod is running on a node
|
||||
- `Pending` - Pod has been accepted but containers are not yet running
|
||||
- `Succeeded` - All containers terminated successfully
|
||||
- `Failed` - All containers terminated, at least one failed
|
||||
- `Unknown` - Pod state cannot be determined
|
||||
|
||||
2. **Pod Status Reason** - Why the pod is in its current state:
|
||||
- `NodeAffinity` - Pod cannot be scheduled due to node affinity rules
|
||||
- `NodeLost` - Node hosting the pod is lost
|
||||
- `Shutdown` - Pod terminated due to node shutdown
|
||||
- `UnexpectedAdmissionError` - Pod admission failed unexpectedly
|
||||
|
||||
3. **Container Waiting Reason** - Why containers are waiting to start:
|
||||
- `PodInitializing` - Pod is still initializing
|
||||
- `ContainerCreating` - Container is being created
|
||||
- `ImagePullBackOff` - Image pull is failing and backing off
|
||||
- `CrashLoopBackOff` - Container is crashing repeatedly
|
||||
- `CreateContainerConfigError` - Container configuration is invalid
|
||||
- `ErrImagePull` - Image cannot be pulled
|
||||
- `CreateContainerError` - Container creation failed
|
||||
- `InvalidImageName` - Image name is invalid
|
||||
|
||||
By default, only regular containers are checked. Enable `includingInitContainers` or `includingEphemeralContainers` to also check those container types.
|
||||
|
||||
## Example
|
||||
|
||||
```yaml
|
||||
apiVersion: descheduler/v1alpha2
|
||||
kind: DeschedulerPolicy
|
||||
profiles:
|
||||
- name: default
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- name: PodLifeTime
|
||||
pluginConfig:
|
||||
- name: PodLifeTime
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 86400 # 1 day
|
||||
namespaces:
|
||||
include:
|
||||
- default
|
||||
states:
|
||||
- Running
|
||||
```
|
||||
|
||||
This configuration evicts Running pods in the `default` namespace that are older than 1 day.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -33,8 +32,25 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func buildTestPodWithImage(podName, node, image string) *v1.Pod {
|
||||
pod := test.BuildTestPod(podName, 100, 0, node, test.SetRSOwnerRef)
|
||||
const (
|
||||
nodeName1 = "n1"
|
||||
nodeName2 = "n2"
|
||||
nodeName3 = "n3"
|
||||
nodeName4 = "n4"
|
||||
nodeName5 = "n5"
|
||||
nodeName6 = "n6"
|
||||
)
|
||||
|
||||
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
func buildTestPodForNode(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildTestPodWithImage(podName, image string) *v1.Pod {
|
||||
pod := buildTestPodForNode(podName, nodeName1, test.SetRSOwnerRef)
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: image,
|
||||
Image: image,
|
||||
@@ -42,144 +58,25 @@ func buildTestPodWithImage(podName, node, image string) *v1.Pod {
|
||||
return pod
|
||||
}
|
||||
|
||||
func buildTestPodWithRSOwnerRefForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return buildTestPodForNode(name, nodeName1, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func buildTestPodWithRSOwnerRefWithNamespaceForNode1(name, namespace string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return buildTestPodWithRSOwnerRefForNode1(name, func(pod *v1.Pod) {
|
||||
pod.Namespace = namespace
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindDuplicatePods(t *testing.T) {
|
||||
// first setup pods
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node5 := test.BuildTestNode("n5", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node6 := test.BuildTestNode("n6", 200, 200, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p2.Namespace = "dev"
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p3.Namespace = "dev"
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p7.Namespace = "kube-system"
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p8.Namespace = "test"
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p9.Namespace = "test"
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p10.Namespace = "test"
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node1.Name, nil)
|
||||
p11.Namespace = "different-images"
|
||||
p12 := test.BuildTestPod("p12", 100, 0, node1.Name, nil)
|
||||
p12.Namespace = "different-images"
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node1.Name, nil)
|
||||
p13.Namespace = "different-images"
|
||||
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
|
||||
p14.Namespace = "different-images"
|
||||
p15 := test.BuildTestPod("p15", 100, 0, node1.Name, nil)
|
||||
p15.Namespace = "node-fit"
|
||||
p16 := test.BuildTestPod("NOT1", 100, 0, node1.Name, nil)
|
||||
p16.Namespace = "node-fit"
|
||||
p17 := test.BuildTestPod("NOT2", 100, 0, node1.Name, nil)
|
||||
p17.Namespace = "node-fit"
|
||||
p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil)
|
||||
p18.Namespace = "node-fit"
|
||||
|
||||
// This pod sits on node6 and is used to take up CPU requests on the node
|
||||
p19 := test.BuildTestPod("CPU-eater", 150, 150, node6.Name, nil)
|
||||
p19.Namespace = "test"
|
||||
|
||||
// Dummy pod for node6 used to do the opposite of p19
|
||||
p20 := test.BuildTestPod("CPU-saver", 100, 150, node6.Name, nil)
|
||||
p20.Namespace = "test"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
|
||||
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p2.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p3.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
// Three Pods in the "test" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||
ownerRef2 := test.GetReplicaSetOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = ownerRef2
|
||||
p9.ObjectMeta.OwnerReferences = ownerRef2
|
||||
p10.ObjectMeta.OwnerReferences = ownerRef2
|
||||
|
||||
// ### Non-evictable Pods ###
|
||||
|
||||
// A DaemonSet.
|
||||
p4.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
|
||||
// A Pod with local storage.
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// A Mirror Pod.
|
||||
p6.Annotations = test.GetMirrorPodAnnotation()
|
||||
|
||||
// A Critical Pod.
|
||||
priority := utils.SystemCriticalPriority
|
||||
p7.Spec.Priority = &priority
|
||||
|
||||
// Same owners, but different images
|
||||
p11.Spec.Containers[0].Image = "foo"
|
||||
p11.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p12.Spec.Containers[0].Image = "bar"
|
||||
p12.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
// Multiple containers
|
||||
p13.ObjectMeta.OwnerReferences = ownerRef1
|
||||
p13.Spec.Containers = append(p13.Spec.Containers, v1.Container{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
})
|
||||
|
||||
// ### Pods Evictable Based On Node Fit ###
|
||||
|
||||
ownerRef3 := test.GetReplicaSetOwnerRefList()
|
||||
p15.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p16.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p17.ObjectMeta.OwnerReferences = ownerRef3
|
||||
p18.ObjectMeta.OwnerReferences = ownerRef3
|
||||
|
||||
p15.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
p16.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
p17.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
@@ -189,92 +86,263 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
nodefit bool
|
||||
}{
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
excludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
},
|
||||
{
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
pods: []*v1.Pod{p4, p5, p6, p7},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode("p4", nodeName1, test.SetDSOwnerRef),
|
||||
buildTestPodForNode("p5", nodeName1, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
}),
|
||||
buildTestPodForNode("p6", nodeName1, func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
buildTestPodForNode("p7", nodeName1, func(pod *v1.Pod) {
|
||||
pod.Namespace = "kube-system"
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Test all Pods: 4 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
buildTestPodForNode("p4", nodeName1, test.SetDSOwnerRef),
|
||||
buildTestPodForNode("p5", nodeName1, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
}),
|
||||
buildTestPodForNode("p6", nodeName1, func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
buildTestPodForNode("p7", nodeName1, func(pod *v1.Pod) {
|
||||
pod.Namespace = "kube-system"
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p8", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p9", "test", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p10", "test", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
},
|
||||
{
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
pods: []*v1.Pod{p11, p12},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Pods with the same owner but different images should not be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p11", "different-images", func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = "foo"
|
||||
}),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p12", "different-images", func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = "bar"
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Pods with multiple containers should not match themselves",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p13", "different-images", func(pod *v1.Pod) {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
})
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
pods: []*v1.Pod{p11, p13},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p11", "different-images", func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = "foo"
|
||||
}),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p13", "different-images", func(pod *v1.Pod) {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
})
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName3, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p15, p16, p17},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p15", "node-fit", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("NOT1", "node-fit", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("NOT2", "node-fit", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName4, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p19},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
test.BuildTestPod("CPU-eater", 150, 150, nodeName6, func(pod *v1.Pod) {
|
||||
pod.Namespace = "test"
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
test.BuildTestNode(nodeName6, 200, 200, 10, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p20},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p1", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p2", "dev", nil),
|
||||
buildTestPodWithRSOwnerRefWithNamespaceForNode1("p3", "dev", nil),
|
||||
test.BuildTestPod("CPU-saver", 100, 150, nodeName6, func(pod *v1.Pod) {
|
||||
pod.Namespace = "test"
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
test.BuildTestNode(nodeName6, 200, 200, 10, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodefit: true,
|
||||
},
|
||||
@@ -330,26 +398,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
setTolerationsK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "k1",
|
||||
Value: "v1",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
setTolerationsK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "k2",
|
||||
Value: "v2",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
setNoScheduleTolerations := func(key, value string) func(*v1.Pod) {
|
||||
return func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,70 +436,42 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/worker"] = "k2"
|
||||
}
|
||||
|
||||
setNotMasterNodeSelectorK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
Key: "k1",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
setNotMasterNodeSelector := func(key string) func(*v1.Pod) {
|
||||
return func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
Key: key,
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setNotMasterNodeSelectorK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
Key: "k2",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
setWorkerLabelSelector := func(value string) func(*v1.Pod) {
|
||||
return func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if pod.Spec.NodeSelector == nil {
|
||||
pod.Spec.NodeSelector = map[string]string{}
|
||||
}
|
||||
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = value
|
||||
}
|
||||
}
|
||||
|
||||
setWorkerLabelSelectorK1 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if pod.Spec.NodeSelector == nil {
|
||||
pod.Spec.NodeSelector = map[string]string{}
|
||||
}
|
||||
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k1"
|
||||
}
|
||||
|
||||
setWorkerLabelSelectorK2 := func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
if pod.Spec.NodeSelector == nil {
|
||||
pod.Spec.NodeSelector = map[string]string{}
|
||||
}
|
||||
pod.Spec.NodeSelector["node-role.kubernetes.io/worker"] = "k2"
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
@@ -451,107 +482,107 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
description: "Evict pods uniformly",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p3", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p4", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p5", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p6", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p7", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p8", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p9", "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with one node left out",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (4,4,1) -> 1 eviction
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p3", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p4", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p5", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p6", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p7", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p8", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p9", "n3", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two replica sets",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
|
||||
test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p11", "n1", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p12", "n1", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p13", "n1", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p14", "n1", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p15", "n1", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p16", "n2", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p17", "n2", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p18", "n2", setTwoRSOwnerRef),
|
||||
buildTestPodForNode("p19", "n3", setTwoRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly with two owner references",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1) -> (3,3,3) -> 2 evictions
|
||||
test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p11", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p12", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p13", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p14", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p15", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p16", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p17", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p18", "n2", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p19", "n3", test.SetRSOwnerRef),
|
||||
// (1,3,5) -> (3,3,3) -> 2 evictions
|
||||
test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
|
||||
test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
|
||||
test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
|
||||
test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
|
||||
buildTestPodForNode("p21", "n1", setRSOwnerRef2),
|
||||
buildTestPodForNode("p22", "n2", setRSOwnerRef2),
|
||||
buildTestPodForNode("p23", "n2", setRSOwnerRef2),
|
||||
buildTestPodForNode("p24", "n2", setRSOwnerRef2),
|
||||
buildTestPodForNode("p25", "n3", setRSOwnerRef2),
|
||||
buildTestPodForNode("p26", "n3", setRSOwnerRef2),
|
||||
buildTestPodForNode("p27", "n3", setRSOwnerRef2),
|
||||
buildTestPodForNode("p28", "n3", setRSOwnerRef2),
|
||||
buildTestPodForNode("p29", "n3", setRSOwnerRef2),
|
||||
},
|
||||
expectedEvictedPodCount: 4,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with number of pods less than nodes",
|
||||
pods: []*v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p2", "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -560,125 +591,125 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
|
||||
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
|
||||
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
|
||||
buildTestPodWithImage("p1", "n1", "foo"),
|
||||
buildTestPodWithImage("p2", "n1", "bar"),
|
||||
buildTestPodWithImage("p3", "n1", "baz"),
|
||||
buildTestPodWithImage("p4", "n1", "foo"),
|
||||
buildTestPodWithImage("p1", "foo"),
|
||||
buildTestPodWithImage("p2", "bar"),
|
||||
buildTestPodWithImage("p3", "baz"),
|
||||
buildTestPodWithImage("p4", "foo"),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods with a single pod with three nodes",
|
||||
pods: []*v1.Pod{
|
||||
// (2,0,0) -> (1,1,0) -> 1 eviction
|
||||
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
|
||||
buildTestPodForNode("p1", "n1", test.SetRSOwnerRef),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("n3", 2000, 3000, 10, nil),
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, nil),
|
||||
buildTestNode(nodeName3, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting taints",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
|
||||
buildTestPodForNode("p1", "worker1", setNoScheduleTolerations("k1", "v1")),
|
||||
buildTestPodForNode("p2", "worker1", setNoScheduleTolerations("k2", "v2")),
|
||||
buildTestPodForNode("p3", "worker1", setNoScheduleTolerations("k1", "v1")),
|
||||
buildTestPodForNode("p4", "worker1", setNoScheduleTolerations("k2", "v2")),
|
||||
buildTestPodForNode("p5", "worker1", setNoScheduleTolerations("k1", "v1")),
|
||||
buildTestPodForNode("p6", "worker2", setNoScheduleTolerations("k2", "v2")),
|
||||
buildTestPodForNode("p7", "worker2", setNoScheduleTolerations("k1", "v1")),
|
||||
buildTestPodForNode("p8", "worker2", setNoScheduleTolerations("k2", "v2")),
|
||||
buildTestPodForNode("p9", "worker3", setNoScheduleTolerations("k1", "v1")),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleTaint),
|
||||
buildTestNode("worker1", nil),
|
||||
buildTestNode("worker2", nil),
|
||||
buildTestNode("worker3", nil),
|
||||
buildTestNode("master1", setMasterNoScheduleTaint),
|
||||
buildTestNode("master2", setMasterNoScheduleTaint),
|
||||
buildTestNode("master3", setMasterNoScheduleTaint),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
|
||||
buildTestPodForNode("p1", "worker1", setNotMasterNodeSelector("k1")),
|
||||
buildTestPodForNode("p2", "worker1", setNotMasterNodeSelector("k2")),
|
||||
buildTestPodForNode("p3", "worker1", setNotMasterNodeSelector("k1")),
|
||||
buildTestPodForNode("p4", "worker1", setNotMasterNodeSelector("k2")),
|
||||
buildTestPodForNode("p5", "worker1", setNotMasterNodeSelector("k1")),
|
||||
buildTestPodForNode("p6", "worker2", setNotMasterNodeSelector("k2")),
|
||||
buildTestPodForNode("p7", "worker2", setNotMasterNodeSelector("k1")),
|
||||
buildTestPodForNode("p8", "worker2", setNotMasterNodeSelector("k2")),
|
||||
buildTestPodForNode("p9", "worker3", setNotMasterNodeSelector("k1")),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, setMasterNoScheduleLabel),
|
||||
buildTestNode("worker1", nil),
|
||||
buildTestNode("worker2", nil),
|
||||
buildTestNode("worker3", nil),
|
||||
buildTestNode("master1", setMasterNoScheduleLabel),
|
||||
buildTestNode("master2", setMasterNoScheduleLabel),
|
||||
buildTestNode("master3", setMasterNoScheduleLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
buildTestPodForNode("p1", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p2", "worker1", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p3", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p4", "worker1", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p5", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p6", "worker2", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p7", "worker2", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p8", "worker2", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p9", "worker3", setWorkerLabelSelector("k1")),
|
||||
},
|
||||
expectedEvictedPodCount: 2,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, setWorkerLabel),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
buildTestNode("worker1", setWorkerLabel),
|
||||
buildTestNode("worker2", setWorkerLabel),
|
||||
buildTestNode("worker3", setWorkerLabel),
|
||||
buildTestNode("master1", nil),
|
||||
buildTestNode("master2", nil),
|
||||
buildTestNode("master3", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Evict pods uniformly respecting node selector with zero target nodes",
|
||||
pods: []*v1.Pod{
|
||||
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
|
||||
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
|
||||
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
|
||||
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
|
||||
buildTestPodForNode("p1", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p2", "worker1", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p3", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p4", "worker1", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p5", "worker1", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p6", "worker2", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p7", "worker2", setWorkerLabelSelector("k1")),
|
||||
buildTestPodForNode("p8", "worker2", setWorkerLabelSelector("k2")),
|
||||
buildTestPodForNode("p9", "worker3", setWorkerLabelSelector("k1")),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("worker1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("worker3", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master2", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("master3", 2000, 3000, 10, nil),
|
||||
buildTestNode("worker1", nil),
|
||||
buildTestNode("worker2", nil),
|
||||
buildTestNode("worker3", nil),
|
||||
buildTestNode("master1", nil),
|
||||
buildTestNode("master2", nil),
|
||||
buildTestNode("master3", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
@@ -33,73 +32,93 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func initPods(node *v1.Node) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
const (
|
||||
nodeName1 = "node1"
|
||||
nodeName2 = "node2"
|
||||
nodeName3 = "node3"
|
||||
nodeName4 = "node4"
|
||||
nodeName5 = "node5"
|
||||
)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
func buildTestNode(nodeName string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(nodeName, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
// pod at index i will have 25 * i restarts.
|
||||
pod.Status = v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 5 * i,
|
||||
},
|
||||
func setPodContainerStatusRestartCount(pod *v1.Pod, base int32) {
|
||||
pod.Status = v1.PodStatus{
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 5 * base,
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
{
|
||||
RestartCount: 10 * i,
|
||||
},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
RestartCount: 10 * base,
|
||||
},
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
// The following 3 pods won't get evicted.
|
||||
// A daemonset.
|
||||
pods[6].ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
pods[7].ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pods[7].Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
{
|
||||
RestartCount: 10 * base,
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pods[8].Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
func initPodContainersWithStatusRestartCount(name string, base int32, apply func(pod *v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName1, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
// pod at index i will have 25 * i restarts, 5 for init container, 20 for other two containers
|
||||
setPodContainerStatusRestartCount(pod, base)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func initPods(apply func(pod *v1.Pod)) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
|
||||
for i := int32(0); i <= 9; i++ {
|
||||
switch i {
|
||||
default:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, apply))
|
||||
// The following 3 pods won't get evicted.
|
||||
// A daemonset.
|
||||
case 6:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
test.SetDSOwnerRef(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
// A pod with local storage.
|
||||
case 7:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
// A Mirror Pod.
|
||||
case 8:
|
||||
pods = append(pods, initPodContainersWithStatusRestartCount(fmt.Sprintf("pod-%d", i), i, func(pod *v1.Pod) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pods = append(
|
||||
pods,
|
||||
test.BuildTestPod("CPU-consumer-1", 150, 100, nodeName4, test.SetNormalOwnerRef),
|
||||
test.BuildTestPod("CPU-consumer-2", 150, 100, nodeName5, test.SetNormalOwnerRef),
|
||||
)
|
||||
|
||||
return pods
|
||||
}
|
||||
|
||||
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
})
|
||||
node3 := test.BuildTestNode("node3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
|
||||
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
|
||||
|
||||
createRemovePodsHavingTooManyRestartsAgrs := func(
|
||||
podRestartThresholds int32,
|
||||
includingInitContainers bool,
|
||||
@@ -114,207 +133,261 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
args RemovePodsHavingTooManyRestartsArgs
|
||||
expectedEvictedPodCount uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxNoOfPodsToEvictPerNamespace *uint
|
||||
nodeFit bool
|
||||
applyFunc func([]*v1.Pod)
|
||||
}{
|
||||
{
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts under threshold, no pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(10000, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Some pods have total restarts bigger than threshold",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*25, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 5,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1*20, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 6,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*25+1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(5*20+1, false),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodeFit: false,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tainted, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName2, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "hardware",
|
||||
Value: "gpu",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName3, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
test.BuildTestNode(nodeName4, 200, 3000, 10, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||
args: createRemovePodsHavingTooManyRestartsAgrs(1, true),
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "pods are in CrashLoopBackOff with states=CrashLoopBackOff, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
}
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
pod.Status.ContainerStatuses[0].State = v1.ContainerState{
|
||||
},
|
||||
{
|
||||
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
pods: initPods(nil),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
buildTestNode(nodeName5, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods running with state=Running, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "pods pending with state=Running, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodPending
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods without CrashLoopBackOff with states=CrashLoopBackOff, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "pods running with state=Running, 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with state=Running, 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{string(v1.PodRunning)}},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
|
||||
pods: initPods(func(pod *v1.Pod) {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.Phase = v1.PodPending
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=true), 3 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: true},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pods pending with initContainer with states=CrashLoopBackOff threshold(includingInitContainers=false), 0 pod evictions",
|
||||
args: RemovePodsHavingTooManyRestartsArgs{PodRestartThreshold: 1, States: []string{"CrashLoopBackOff"}, IncludingInitContainers: false},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
applyFunc: func(pods []*v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
pod.Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
pods := append(
|
||||
initPods(node1),
|
||||
test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, test.SetNormalOwnerRef),
|
||||
test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, test.SetNormalOwnerRef),
|
||||
)
|
||||
if tc.applyFunc != nil {
|
||||
tc.applyFunc(pods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -322,7 +395,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -33,84 +33,75 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
|
||||
node5 := test.BuildTestNode("n5", 200, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
const (
|
||||
nodeName1 = "n1"
|
||||
nodeName2 = "n2"
|
||||
nodeName3 = "n3"
|
||||
nodeName4 = "n4"
|
||||
nodeName5 = "n5"
|
||||
)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node5.Name, nil)
|
||||
p9.DeletionTimestamp = &metav1.Time{}
|
||||
p10.DeletionTimestamp = &metav1.Time{}
|
||||
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(name, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Priority = &criticalPriority
|
||||
})
|
||||
p2.Labels = map[string]string{"foo": "bar"}
|
||||
p5.Labels = map[string]string{"foo": "bar"}
|
||||
p6.Labels = map[string]string{"foo": "bar"}
|
||||
p7.Labels = map[string]string{"foo1": "bar1"}
|
||||
p11.Labels = map[string]string{"foo": "bar"}
|
||||
nonEvictablePod.Labels = map[string]string{"foo": "bar"}
|
||||
test.SetNormalOwnerRef(p1)
|
||||
test.SetNormalOwnerRef(p2)
|
||||
test.SetNormalOwnerRef(p3)
|
||||
test.SetNormalOwnerRef(p4)
|
||||
test.SetNormalOwnerRef(p5)
|
||||
test.SetNormalOwnerRef(p6)
|
||||
test.SetNormalOwnerRef(p7)
|
||||
test.SetNormalOwnerRef(p9)
|
||||
test.SetNormalOwnerRef(p10)
|
||||
test.SetNormalOwnerRef(p11)
|
||||
|
||||
// set pod anti affinity
|
||||
test.SetPodAntiAffinity(p1, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p3, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p4, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p5, "foo1", "bar1")
|
||||
test.SetPodAntiAffinity(p6, "foo1", "bar1")
|
||||
test.SetPodAntiAffinity(p7, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p9, "foo", "bar")
|
||||
test.SetPodAntiAffinity(p10, "foo", "bar")
|
||||
|
||||
// set pod priority
|
||||
test.SetPodPriority(p5, 100)
|
||||
test.SetPodPriority(p6, 50)
|
||||
test.SetPodPriority(p7, 0)
|
||||
|
||||
// Set pod node selectors
|
||||
p8.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
func setNodeMainRegionLabel(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestNode1() *v1.Node {
|
||||
return buildTestNode(nodeName1, setNodeMainRegionLabel)
|
||||
}
|
||||
|
||||
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildTestPodForNode1(name string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return buildTestPod(name, nodeName1, apply)
|
||||
}
|
||||
|
||||
func setPodAntiAffinityFooBar(pod *v1.Pod) {
|
||||
test.SetPodAntiAffinity(pod, "foo", "bar")
|
||||
}
|
||||
|
||||
func setPodAntiAffinityFoo1Bar1(pod *v1.Pod) {
|
||||
test.SetPodAntiAffinity(pod, "foo1", "bar1")
|
||||
}
|
||||
|
||||
func setLabelsFooBar(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo": "bar"}
|
||||
}
|
||||
|
||||
func setLabelsFoo1Bar1(pod *v1.Pod) {
|
||||
pod.Labels = map[string]string{"foo1": "bar1"}
|
||||
}
|
||||
|
||||
func buildTestPodWithAntiAffinityForNode1(name string) *v1.Pod {
|
||||
return buildTestPodForNode1(name, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func buildTestPodP2ForNode1() *v1.Pod {
|
||||
return buildTestPodForNode1("p2", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func buildTestPodNonEvictableForNode1() *v1.Pod {
|
||||
criticalPriority := utils.SystemCriticalPriority
|
||||
return buildTestPodForNode1("non-evict", func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, criticalPriority)
|
||||
setLabelsFooBar(pod)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPodAntiAffinity(t *testing.T) {
|
||||
var uint1 uint = 1
|
||||
var uint3 uint = 3
|
||||
|
||||
@@ -125,87 +116,204 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
}{
|
||||
{
|
||||
description: "Maximum pods to evict - 0",
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Maximum pods to evict - 0",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Maximum pods to evict - 3",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 3,
|
||||
},
|
||||
{
|
||||
description: "Maximum pods to evict (maxNoOfPodsToEvictTotal)",
|
||||
maxNoOfPodsToEvictPerNamespace: &uint3,
|
||||
maxNoOfPodsToEvictTotal: &uint1,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evict only 1 pod after sorting",
|
||||
pods: []*v1.Pod{p5, p6, p7},
|
||||
nodes: []*v1.Node{node1},
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Evict only 1 pod after sorting",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p5", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
setPodAntiAffinityFoo1Bar1(pod)
|
||||
test.SetPodPriority(pod, 100)
|
||||
}),
|
||||
buildTestPodForNode1("p6", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
setPodAntiAffinityFoo1Bar1(pod)
|
||||
test.SetPodPriority(pod, 50)
|
||||
}),
|
||||
buildTestPodForNode1("p7", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFoo1Bar1(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
test.SetPodPriority(pod, 0)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p1, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because node selectors don't match available nodes",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p8", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
buildTestNode(nodeName2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{p8, nonEvictablePod},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
description: "Won't evict pods because only other node is not schedulable",
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p8", func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
}),
|
||||
buildTestPodNonEvictableForNode1(),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
buildTestNode(nodeName3, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
pods: []*v1.Pod{p9, p10},
|
||||
nodes: []*v1.Node{node1},
|
||||
description: "No pod to evicted since all pod terminating",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodForNode1("p9", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
pod.DeletionTimestamp = &metav1.Time{}
|
||||
}),
|
||||
buildTestPodForNode1("p10", func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setPodAntiAffinityFooBar(pod)
|
||||
pod.DeletionTimestamp = &metav1.Time{}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node doesn't have enough resources",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
description: "Won't evict pods because only other node doesn't have enough resources",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPodP2ForNode1(),
|
||||
buildTestPodWithAntiAffinityForNode1("p3"),
|
||||
buildTestPodWithAntiAffinityForNode1("p4"),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
test.BuildTestNode(nodeName4, 2, 2, 1, nil),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
|
||||
pods: []*v1.Pod{p1, p11},
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
description: "Evict pod violating anti-affinity among different node (all pods have anti-affinity)",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithAntiAffinityForNode1("p1"),
|
||||
buildTestPod("p11", nodeName5, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
setLabelsFooBar(pod)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode1(),
|
||||
test.BuildTestNode(nodeName5, 200, 3000, 10, setNodeMainRegionLabel),
|
||||
},
|
||||
expectedEvictedPodCount: 1,
|
||||
nodeFit: false,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -32,73 +32,91 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeWithLabelsName = "nodeWithLabels"
|
||||
nodeWithoutLabelsName = "nodeWithoutLabels"
|
||||
unschedulableNodeWithLabelsName = "unschedulableNodeWithLabels"
|
||||
nodeLabelKey = "kubernetes.io/desiredNode"
|
||||
nodeLabelValue = "yes"
|
||||
)
|
||||
|
||||
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(name, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
func setNodeDesiredNodeLabel(node *v1.Node) {
|
||||
node.Labels[nodeLabelKey] = nodeLabelValue
|
||||
}
|
||||
|
||||
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildUnschedulableNodeWithLabels() *v1.Node {
|
||||
return buildTestNode(unschedulableNodeWithLabelsName, func(node *v1.Node) {
|
||||
setNodeDesiredNodeLabel(node)
|
||||
node.Spec.Unschedulable = true
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeWithLabels := test.BuildTestNode("nodeWithLabels", 2000, 3000, 10, nil)
|
||||
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
addPodsToNode := func(nodeName string, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := buildTestPod("podWithNodeAffinity", nodeName, func(pod *v1.Pod) {
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
|
||||
|
||||
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
|
||||
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
|
||||
unschedulableNodeWithLabels.Spec.Unschedulable = true
|
||||
|
||||
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time, affinityType string) []*v1.Pod {
|
||||
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
|
||||
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
}
|
||||
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
switch affinityType {
|
||||
case "requiredDuringSchedulingIgnoredDuringExecution":
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
pod.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
case "preferredDuringSchedulingIgnoredDuringExecution":
|
||||
podWithNodeAffinity.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeLabelKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case "requiredDuringSchedulingRequiredDuringExecution":
|
||||
default:
|
||||
t.Fatalf("Invalid affinity type %s", affinityType)
|
||||
}
|
||||
|
||||
pod1 := test.BuildTestPod("pod1", 100, 0, node.Name, nil)
|
||||
pod2 := test.BuildTestPod("pod2", 100, 0, node.Name, nil)
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
|
||||
podWithNodeAffinity.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
podWithNodeAffinity.DeletionTimestamp = deletionTimestamp
|
||||
pod1.DeletionTimestamp = deletionTimestamp
|
||||
pod2.DeletionTimestamp = deletionTimestamp
|
||||
pod1 := buildTestPod("pod1", nodeName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
pod2 := buildTestPod("pod2", nodeName, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.DeletionTimestamp = deletionTimestamp
|
||||
})
|
||||
|
||||
return []*v1.Pod{
|
||||
podWithNodeAffinity,
|
||||
@@ -126,8 +144,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingRequiredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingRequiredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [required affinity]",
|
||||
@@ -135,8 +156,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is correctly scheduled on node, no eviction expected [preferred affinity]",
|
||||
@@ -144,8 +167,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: addPodsToNode(nodeWithLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
|
||||
@@ -153,8 +178,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available with better fit, should be evicted",
|
||||
@@ -162,8 +190,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should be evicted [required affinity]",
|
||||
@@ -171,8 +202,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -181,8 +215,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -191,8 +228,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -201,8 +241,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -211,8 +254,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -221,8 +267,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -231,8 +280,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -241,8 +293,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
maxNoOfPodsToEvictTotal: &uint0,
|
||||
},
|
||||
@@ -252,8 +307,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -262,8 +320,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -272,8 +333,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint0,
|
||||
},
|
||||
{
|
||||
@@ -282,8 +346,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -292,8 +359,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, &metav1.Time{}, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
},
|
||||
{
|
||||
@@ -302,8 +372,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
@@ -312,8 +385,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithoutLabelsName, nil),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
nodefit: true,
|
||||
},
|
||||
{
|
||||
@@ -322,8 +398,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "requiredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
@@ -333,8 +412,11 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
args: RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"preferredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
pods: addPodsToNode(nodeWithoutLabels, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
|
||||
pods: addPodsToNode(nodeWithoutLabelsName, nil, "preferredDuringSchedulingIgnoredDuringExecution"),
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeWithLabelsName, setNodeDesiredNodeLabel),
|
||||
buildUnschedulableNodeWithLabels(),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
nodefit: true,
|
||||
},
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
@@ -34,6 +33,37 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeName1 = "n1"
|
||||
nodeName2 = "n2"
|
||||
nodeName3 = "n3"
|
||||
nodeName4 = "n4"
|
||||
nodeName5 = "n5"
|
||||
nodeName6 = "n6"
|
||||
nodeName7 = "n7"
|
||||
|
||||
datacenterLabel = "datacenter"
|
||||
datacenterEast = "east"
|
||||
datacenterWest = "west"
|
||||
)
|
||||
|
||||
func buildTestNode(name string, apply func(*v1.Node)) *v1.Node {
|
||||
return test.BuildTestNode(name, 2000, 3000, 10, apply)
|
||||
}
|
||||
|
||||
func buildTestPod(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return test.BuildTestPod(name, 100, 0, nodeName, apply)
|
||||
}
|
||||
|
||||
func buildTestPodWithNormalOwnerRef(name, nodeName string, apply func(*v1.Pod)) *v1.Pod {
|
||||
return buildTestPod(name, nodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
if apply != nil {
|
||||
apply(pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func createNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||
return v1.Taint{
|
||||
Key: "testTaint" + fmt.Sprintf("%v", index),
|
||||
@@ -50,13 +80,39 @@ func createPreferNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||
}
|
||||
}
|
||||
|
||||
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
|
||||
taints := []v1.Taint{}
|
||||
for _, index := range indices {
|
||||
taints = append(taints, createNoScheduleTaint(key, value, index))
|
||||
func withTestTaint1(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
createNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
}
|
||||
|
||||
func withTestingTaint1(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
createNoScheduleTaint("testingTaint", "testing", 1),
|
||||
}
|
||||
}
|
||||
|
||||
func withBothTaints1(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
createNoScheduleTaint("testTaint", "test", 1),
|
||||
createNoScheduleTaint("testingTaint", "testing", 1),
|
||||
}
|
||||
}
|
||||
|
||||
func withDatacenterEastLabel(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
datacenterLabel: datacenterEast,
|
||||
}
|
||||
}
|
||||
|
||||
func withUnschedulable(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
}
|
||||
|
||||
func withPreferNoScheduleTestTaint1(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
createPreferNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
node.Spec.Taints = taints
|
||||
return node
|
||||
}
|
||||
|
||||
func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.TaintEffect) *v1.Pod {
|
||||
@@ -69,111 +125,24 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.Tai
|
||||
return pod
|
||||
}
|
||||
|
||||
func withTestTaintToleration1(pod *v1.Pod) {
|
||||
addTolerationToPod(pod, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
}
|
||||
|
||||
func withTestTaintXToleration1(pod *v1.Pod) {
|
||||
addTolerationToPod(pod, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
|
||||
}
|
||||
|
||||
func withLocalStorageVolume(pod *v1.Pod) {
|
||||
test.SetHostPathEmptyDirVolumeSource(pod)
|
||||
}
|
||||
|
||||
func withKubeSystemCriticalPod(pod *v1.Pod) {
|
||||
pod.Namespace = "kube-system"
|
||||
test.SetPodPriority(pod, utils.SystemCriticalPriority)
|
||||
}
|
||||
|
||||
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
|
||||
|
||||
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"datacenter": "east",
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec = v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
|
||||
node5 := test.BuildTestNode("n5", 2000, 3000, 10, nil)
|
||||
node5.Spec.Taints = []v1.Taint{
|
||||
createPreferNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
|
||||
node6 := test.BuildTestNode("n6", 1, 1, 1, nil)
|
||||
node6.Spec.Taints = []v1.Taint{
|
||||
createPreferNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
|
||||
node7 := test.BuildTestNode("n7", 2000, 3000, 10, nil)
|
||||
node7 = addTaintsToNode(node7, "testTaint", "test", []int{1})
|
||||
node7 = addTaintsToNode(node7, "testingTaint", "testing", []int{1})
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, nil)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, nil)
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p7 := test.BuildTestPod("p7", 100, 0, node2.Name, nil)
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node2.Name, nil)
|
||||
p9 := test.BuildTestPod("p9", 100, 0, node2.Name, nil)
|
||||
p10 := test.BuildTestPod("p10", 100, 0, node2.Name, nil)
|
||||
p11 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
p12 := test.BuildTestPod("p11", 100, 0, node2.Name, nil)
|
||||
|
||||
p1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p3.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p4.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p6.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p8.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p10.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p11.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p12.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// The following 4 pods won't get evicted.
|
||||
// A Critical Pod.
|
||||
p7.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
p7.Spec.Priority = &priority
|
||||
p7.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
// A daemonset.
|
||||
p8.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
// A pod with local storage.
|
||||
p9.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p9.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
p10.Annotations = test.GetMirrorPodAnnotation()
|
||||
|
||||
p1 = addTolerationToPod(p1, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
p3 = addTolerationToPod(p3, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
|
||||
|
||||
p12.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node5.Name, nil)
|
||||
p13.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
// node5 has PreferNoSchedule:testTaint1=test1, so the p13 has to have
|
||||
// PreferNoSchedule:testTaint0=test0 so the pod is not tolarated
|
||||
p13 = addTolerationToPod(p13, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
|
||||
|
||||
p14 := test.BuildTestPod("p14", 100, 0, node7.Name, nil)
|
||||
p14.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p14 = addTolerationToPod(p14, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
|
||||
p15 := test.BuildTestPod("p15", 100, 0, node7.Name, nil)
|
||||
p15.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
p15 = addTolerationToPod(p15, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
p15 = addTolerationToPod(p15, "testingTaint", "testing", 1, v1.TaintEffectNoSchedule)
|
||||
|
||||
var uint1, uint2 uint = 1, 2
|
||||
|
||||
tests := []struct {
|
||||
@@ -192,203 +161,306 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
includedTaints []string
|
||||
}{
|
||||
{
|
||||
description: "Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 1, // p2 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Pods with tolerations but not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p3, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods with tolerations but not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p4", nodeName1, withTestTaintXToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 1, // p4 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxNoOfPodsToEvictTotal> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Only <maxNoOfPodsToEvictTotal> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint2,
|
||||
maxNoOfPodsToEvictTotal: &uint1,
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p1, p5, p6},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p5", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p6", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
maxNoOfPodsToEvictPerNamespace: &uint1,
|
||||
expectedEvictedPodCount: 1, // p5 or p6 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical pods not tolerating node taint should not be evicted",
|
||||
pods: []*v1.Pod{p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Critical pods not tolerating node taint should not be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
|
||||
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
|
||||
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
|
||||
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName2, withTestingTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 0, // nothing is evicted
|
||||
},
|
||||
{
|
||||
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||
pods: []*v1.Pod{p7, p8, p9, p10},
|
||||
nodes: []*v1.Node{node2},
|
||||
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
|
||||
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
|
||||
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
|
||||
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName2, withTestingTaint1),
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 1, // p9 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{p7, p8, p10, p11},
|
||||
nodes: []*v1.Node{node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
|
||||
buildTestPod("p8", nodeName2, test.SetDSOwnerRef),
|
||||
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
|
||||
buildTestPodWithNormalOwnerRef("p11", nodeName2, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName2, withTestingTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 1, // p11 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
|
||||
pods: []*v1.Pod{p2, p7, p9, p10},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
|
||||
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
|
||||
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
buildTestNode(nodeName2, withTestingTaint1),
|
||||
},
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 2, // p2 and p7 are evicted
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node2},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
buildTestNode(nodeName2, withTestingTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
|
||||
pods: []*v1.Pod{p1, p3, p12},
|
||||
nodes: []*v1.Node{node1, node3},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p11", nodeName2, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
datacenterLabel: datacenterWest,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
buildTestNode(nodeName3, withDatacenterEastLabel),
|
||||
},
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
buildTestNode(nodeName4, withUnschedulable),
|
||||
},
|
||||
expectedEvictedPodCount: 0, // p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node5},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p13", nodeName5, func(pod *v1.Pod) {
|
||||
addTolerationToPod(pod, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName5, withPreferNoScheduleTestTaint1),
|
||||
},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node5},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p13", nodeName5, func(pod *v1.Pod) {
|
||||
addTolerationToPod(pod, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName5, withPreferNoScheduleTestTaint1),
|
||||
},
|
||||
includePreferNoSchedule: true,
|
||||
expectedEvictedPodCount: 1, // p13 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
excludedTaints: []string{"excludedTaint1", "testTaint1"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as one of the specified excludedTaints matches the key of node1's taint
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
excludedTaints: []string{"testTaint1=test1"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as both the key and value of the excluded taint match node1's taint
|
||||
},
|
||||
{
|
||||
description: "The excluded taint matches the key of node1's taint, but does not match the value",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "The excluded taint matches the key of node1's taint, but does not match the value",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
excludedTaints: []string{"testTaint1=test2"},
|
||||
expectedEvictedPodCount: 1, // pod gets evicted, as excluded taint value does not match node1's taint value
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
|
||||
pods: []*v1.Pod{p2, p7, p9, p10},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
evictLocalStoragePods: false,
|
||||
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p7", nodeName2, withKubeSystemCriticalPod),
|
||||
buildTestPodWithNormalOwnerRef("p9", nodeName2, withLocalStorageVolume),
|
||||
buildTestPodWithNormalOwnerRef("p10", nodeName2, test.SetMirrorPodAnnotation),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
test.BuildTestNode(nodeName6, 1, 1, 1, withPreferNoScheduleTestTaint1),
|
||||
},
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 0, // p2 and p7 can't be evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pods tolerating included taints should not get evicted even with other taints present",
|
||||
pods: []*v1.Pod{p1},
|
||||
nodes: []*v1.Node{node7},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods tolerating included taints should not get evicted even with other taints present",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName7, withBothTaints1),
|
||||
},
|
||||
includedTaints: []string{"testTaint1=test1"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as p1 tolerates the included taint, and taint "testingTaint1=testing1" is not included
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating not included taints should not get evicted",
|
||||
pods: []*v1.Pod{p1, p2, p4},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating not included taints should not get evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p4", nodeName1, withTestTaintXToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
includedTaints: []string{"testTaint2=test2"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as taint is not included, even though the pods' p2 and p4 tolerations do not match node1's taint
|
||||
},
|
||||
{
|
||||
description: "Pods tolerating includedTaint should not get evicted. Pods not tolerating includedTaints should get evicted",
|
||||
pods: []*v1.Pod{p1, p2, p3},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods tolerating includedTaint should not get evicted. Pods not tolerating includedTaints should get evicted",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p1", nodeName1, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p2", nodeName1, nil),
|
||||
buildTestPodWithNormalOwnerRef("p3", nodeName1, withTestTaintToleration1),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName1, withTestTaint1),
|
||||
},
|
||||
includedTaints: []string{"testTaint1=test1"},
|
||||
expectedEvictedPodCount: 1, // node1 taint is included. p1 and p3 tolerate the included taint, p2 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating all taints are evicted when includedTaints is empty",
|
||||
pods: []*v1.Pod{p14, p15},
|
||||
nodes: []*v1.Node{node7},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
description: "Pods not tolerating all taints are evicted when includedTaints is empty",
|
||||
pods: []*v1.Pod{
|
||||
buildTestPodWithNormalOwnerRef("p14", nodeName7, withTestTaintToleration1),
|
||||
buildTestPodWithNormalOwnerRef("p15", nodeName7, func(pod *v1.Pod) {
|
||||
withTestTaintToleration1(pod)
|
||||
addTolerationToPod(pod, "testingTaint", "testing", 1, v1.TaintEffectNoSchedule)
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
buildTestNode(nodeName7, withBothTaints1),
|
||||
},
|
||||
expectedEvictedPodCount: 1, // includedTaints is empty so all taints are included. p15 tolerates both node taints and does not get evicted. p14 tolerate only one and gets evicted
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -9,12 +9,10 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -1490,18 +1488,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
evictedPods = append(evictedPods, eviction.GetName())
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
})
|
||||
test.RegisterEvictedPodsCollector(fakeClient, &evictedPods)
|
||||
|
||||
SetDefaults_RemovePodsViolatingTopologySpreadConstraintArgs(&tc.args)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2026 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -78,6 +78,14 @@ type handleImpl struct {
|
||||
|
||||
var _ frameworktypes.Handle = &handleImpl{}
|
||||
|
||||
// pluginHandle wraps a shared handleImpl and adds a plugin-specific instance ID
|
||||
type pluginHandle struct {
|
||||
*handleImpl
|
||||
pluginInstanceID string
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &pluginHandle{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
@@ -106,6 +114,17 @@ func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
// PluginInstanceID returns an empty string for the base handle.
|
||||
// Plugins should receive a pluginHandle which has a specific instance ID.
|
||||
func (hi *handleImpl) PluginInstanceID() string {
|
||||
panic(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
|
||||
// PluginInstanceID returns a unique identifier for this plugin instance.
|
||||
func (ph *pluginHandle) PluginInstanceID() string {
|
||||
return ph.pluginInstanceID
|
||||
}
|
||||
|
||||
type filterPlugin interface {
|
||||
frameworktypes.Plugin
|
||||
Filter(pod *v1.Pod) bool
|
||||
@@ -142,6 +161,7 @@ type handleImplOpts struct {
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
podEvictor *evictions.PodEvictor
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
profileInstanceID string
|
||||
}
|
||||
|
||||
// WithClientSet sets clientSet for the scheduling frameworkImpl.
|
||||
@@ -182,6 +202,14 @@ func WithMetricsCollector(metricsCollector *metricscollector.MetricsCollector) O
|
||||
}
|
||||
}
|
||||
|
||||
// WithProfileInstanceID sets the profile instance ID for the handle.
|
||||
// This will be used to construct unique plugin instance IDs.
|
||||
func WithProfileInstanceID(profileInstanceID string) Option {
|
||||
return func(o *handleImplOpts) {
|
||||
o.profileInstanceID = profileInstanceID
|
||||
}
|
||||
}
|
||||
|
||||
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
|
||||
for idx, pluginConfig := range pluginConfigs {
|
||||
if pluginConfig.Name == pluginName {
|
||||
@@ -191,7 +219,7 @@ func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
func buildPlugin(ctx context.Context, config api.DeschedulerProfile, pluginName string, handle *handleImpl, reg pluginregistry.Registry) (frameworktypes.Plugin, error) {
|
||||
func buildPlugin(ctx context.Context, config api.DeschedulerProfile, pluginName string, handle frameworktypes.Handle, reg pluginregistry.Registry) (frameworktypes.Plugin, error) {
|
||||
pc, _ := getPluginConfig(pluginName, config.PluginConfigs)
|
||||
if pc == nil {
|
||||
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", pluginName, "profile", config.Name)
|
||||
@@ -272,6 +300,7 @@ func NewProfile(ctx context.Context, config api.DeschedulerProfile, reg pluginre
|
||||
return nil, fmt.Errorf("profile %q configures preEvictionFilter extension point of non-existing plugins: %v", config.Name, sets.New(config.Plugins.PreEvictionFilter.Enabled...).Difference(pi.preEvictionFilter))
|
||||
}
|
||||
|
||||
// Create a base handle that will be used as a template for plugin-specific handles
|
||||
handle := &handleImpl{
|
||||
clientSet: hOpts.clientSet,
|
||||
getPodsAssignedToNodeFunc: hOpts.getPodsAssignedToNodeFunc,
|
||||
@@ -284,20 +313,26 @@ func NewProfile(ctx context.Context, config api.DeschedulerProfile, reg pluginre
|
||||
prometheusClient: hOpts.prometheusClient,
|
||||
}
|
||||
|
||||
// Collect all unique plugin names across all extension points
|
||||
pluginNames := append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...)
|
||||
pluginNames = append(pluginNames, config.Plugins.Filter.Enabled...)
|
||||
pluginNames = append(pluginNames, config.Plugins.PreEvictionFilter.Enabled...)
|
||||
|
||||
// Build each unique plugin only once with a unique plugin instance ID
|
||||
plugins := make(map[string]frameworktypes.Plugin)
|
||||
for _, plugin := range sets.New(pluginNames...).UnsortedList() {
|
||||
pg, err := buildPlugin(ctx, config, plugin, handle, reg)
|
||||
for idx, pluginName := range sets.New(pluginNames...).UnsortedList() {
|
||||
ph := &pluginHandle{
|
||||
handleImpl: handle,
|
||||
pluginInstanceID: fmt.Sprintf("%s-%d", hOpts.profileInstanceID, idx),
|
||||
}
|
||||
pg, err := buildPlugin(ctx, config, pluginName, ph, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build %v plugin: %v", plugin, err)
|
||||
return nil, fmt.Errorf("unable to build %v plugin: %v", pluginName, err)
|
||||
}
|
||||
if pg == nil {
|
||||
return nil, fmt.Errorf("got empty %v plugin build", plugin)
|
||||
return nil, fmt.Errorf("got empty %v plugin build", pluginName)
|
||||
}
|
||||
plugins[plugin] = pg
|
||||
plugins[pluginName] = pg
|
||||
}
|
||||
|
||||
// Later, when a default list of plugins and their extension points is established,
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -26,7 +27,60 @@ import (
|
||||
testutils "sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
// registerDefaultEvictor registers the DefaultEvictor plugin with the given registry
|
||||
func registerDefaultEvictor(registry pluginregistry.Registry) {
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
||||
// Helper to build profile config with default Filter and PreEvictionFilter
|
||||
buildProfileConfig := func(name string, descheduleEnabled, balanceEnabled bool) api.DeschedulerProfile {
|
||||
config := api.DeschedulerProfile{
|
||||
Name: name,
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
}
|
||||
if descheduleEnabled {
|
||||
config.Plugins.Deschedule = api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
}
|
||||
}
|
||||
if balanceEnabled {
|
||||
config.Plugins.Balance = api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
}
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
config api.DeschedulerProfile
|
||||
@@ -34,134 +88,26 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
||||
expectedEviction bool
|
||||
}{
|
||||
{
|
||||
name: "profile with deschedule extension point enabled single eviction",
|
||||
config: api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile-with-deschedule",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "profile with deschedule extension point enabled single eviction",
|
||||
config: buildProfileConfig("strategy-test-profile-with-deschedule", true, false),
|
||||
extensionPoint: frameworktypes.DescheduleExtensionPoint,
|
||||
expectedEviction: true,
|
||||
},
|
||||
{
|
||||
name: "profile with balance extension point enabled single eviction",
|
||||
config: api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile-with-balance",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "profile with balance extension point enabled single eviction",
|
||||
config: buildProfileConfig("strategy-test-profile-with-balance", false, true),
|
||||
extensionPoint: frameworktypes.BalanceExtensionPoint,
|
||||
expectedEviction: true,
|
||||
},
|
||||
{
|
||||
name: "profile with deschedule extension point balance enabled no eviction",
|
||||
config: api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile-with-deschedule",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "profile with deschedule extension point balance enabled no eviction",
|
||||
config: buildProfileConfig("strategy-test-profile-with-balance", false, true),
|
||||
extensionPoint: frameworktypes.DescheduleExtensionPoint,
|
||||
expectedEviction: false,
|
||||
},
|
||||
{
|
||||
name: "profile with balance extension point deschedule enabled no eviction",
|
||||
config: api.DeschedulerProfile{
|
||||
Name: "strategy-test-profile-with-deschedule",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FakePlugin",
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{"FakePlugin"},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: "profile with balance extension point deschedule enabled no eviction",
|
||||
config: buildProfileConfig("strategy-test-profile-with-deschedule", true, false),
|
||||
extensionPoint: frameworktypes.BalanceExtensionPoint,
|
||||
expectedEviction: false,
|
||||
},
|
||||
@@ -206,25 +152,9 @@ func TestProfileDescheduleBalanceExtensionPointsEviction(t *testing.T) {
|
||||
}
|
||||
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(
|
||||
"FakePlugin",
|
||||
fakeplugin.NewPluginFncFromFake(&fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
fakeplugin.RegisterFakePlugin("FakePlugin", &fakePlugin, pluginregistry.PluginRegistry)
|
||||
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
registerDefaultEvictor(pluginregistry.PluginRegistry)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
var evictedPods []string
|
||||
@@ -319,56 +249,13 @@ func TestProfileExtensionPoints(t *testing.T) {
|
||||
fakeBalancePlugin := &fakeplugin.FakeBalancePlugin{PluginName: balancePluginName}
|
||||
fakeFilterPlugin := &fakeplugin.FakeFilterPlugin{PluginName: filterPluginName}
|
||||
|
||||
pluginregistry.Register(
|
||||
fakePluginName,
|
||||
fakeplugin.NewPluginFncFromFake(fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
deschedulePluginName,
|
||||
fakeplugin.NewFakeDeschedulePluginFncFromFake(fakeDeschedulePlugin),
|
||||
&fakeplugin.FakeDeschedulePlugin{},
|
||||
&fakeplugin.FakeDeschedulePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
balancePluginName,
|
||||
fakeplugin.NewFakeBalancePluginFncFromFake(fakeBalancePlugin),
|
||||
&fakeplugin.FakeBalancePlugin{},
|
||||
&fakeplugin.FakeBalancePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
|
||||
pluginregistry.Register(
|
||||
filterPluginName,
|
||||
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
|
||||
&fakeplugin.FakeFilterPlugin{},
|
||||
&fakeplugin.FakeFilterPluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
fakeplugin.RegisterFakePlugin(fakePluginName, fakePlugin, pluginregistry.PluginRegistry)
|
||||
fakeplugin.RegisterFakeDeschedulePlugin(deschedulePluginName, fakeDeschedulePlugin, pluginregistry.PluginRegistry)
|
||||
fakeplugin.RegisterFakeBalancePlugin(balancePluginName, fakeBalancePlugin, pluginregistry.PluginRegistry)
|
||||
fakeplugin.RegisterFakeFilterPlugin(filterPluginName, fakeFilterPlugin, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
registerDefaultEvictor(pluginregistry.PluginRegistry)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
var evictedPods []string
|
||||
@@ -524,15 +411,7 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
|
||||
})
|
||||
|
||||
// plugin implementing Filter extension point
|
||||
pluginregistry.Register(
|
||||
pluginName,
|
||||
fakeplugin.NewFakeFilterPluginFncFromFake(fakeFilterPlugin),
|
||||
&fakeplugin.FakeFilterPlugin{},
|
||||
&fakeplugin.FakeFilterPluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
fakeplugin.RegisterFakeFilterPlugin(pluginName, fakeFilterPlugin, pluginregistry.PluginRegistry)
|
||||
|
||||
fakePluginName := fmt.Sprintf("FakePlugin_%v", i)
|
||||
fakePlugin := fakeplugin.FakePlugin{}
|
||||
@@ -557,26 +436,10 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
|
||||
return true, false, nil
|
||||
})
|
||||
|
||||
pluginregistry.Register(
|
||||
fakePluginName,
|
||||
fakeplugin.NewPluginFncFromFake(&fakePlugin),
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
fakeplugin.RegisterFakePlugin(fakePluginName, &fakePlugin, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
pluginregistry.Register(
|
||||
defaultevictor.PluginName,
|
||||
defaultevictor.New,
|
||||
&defaultevictor.DefaultEvictor{},
|
||||
&defaultevictor.DefaultEvictorArgs{},
|
||||
defaultevictor.ValidateDefaultEvictorArgs,
|
||||
defaultevictor.SetDefaults_DefaultEvictorArgs,
|
||||
pluginregistry.PluginRegistry,
|
||||
)
|
||||
registerDefaultEvictor(pluginregistry.PluginRegistry)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
var evictedPods []string
|
||||
@@ -680,3 +543,325 @@ func TestProfileExtensionPointOrdering(t *testing.T) {
|
||||
t.Errorf("check for balance invocation order failed. Results are not deep equal. mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
// verifyInstanceIDsMatch verifies that instance IDs captured at creation, deschedule, and balance match
|
||||
func verifyInstanceIDsMatch(t *testing.T, profileInstanceID string, pluginNames []string, creationIDs, descheduleIDs, balanceIDs map[string]string) {
|
||||
for _, pluginName := range pluginNames {
|
||||
creationID := creationIDs[pluginName]
|
||||
descheduleID := descheduleIDs[pluginName]
|
||||
balanceID := balanceIDs[pluginName]
|
||||
|
||||
if creationID == "" {
|
||||
t.Errorf("Profile %s, plugin %s: plugin creation did not capture instance ID", profileInstanceID, pluginName)
|
||||
}
|
||||
if descheduleID == "" {
|
||||
t.Errorf("Profile %s, plugin %s: deschedule extension point did not capture instance ID", profileInstanceID, pluginName)
|
||||
}
|
||||
if balanceID == "" {
|
||||
t.Errorf("Profile %s, plugin %s: balance extension point did not capture instance ID", profileInstanceID, pluginName)
|
||||
}
|
||||
|
||||
// Verify all IDs match
|
||||
if creationID != descheduleID {
|
||||
t.Errorf("Profile %s, plugin %s: instance ID mismatch - creation: %s, deschedule: %s", profileInstanceID, pluginName, creationID, descheduleID)
|
||||
}
|
||||
if creationID != balanceID {
|
||||
t.Errorf("Profile %s, plugin %s: instance ID mismatch - creation: %s, balance: %s", profileInstanceID, pluginName, creationID, balanceID)
|
||||
}
|
||||
if descheduleID != balanceID {
|
||||
t.Errorf("Profile %s, plugin %s: instance ID mismatch - deschedule: %s, balance: %s", profileInstanceID, pluginName, descheduleID, balanceID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// verifyInstanceIDFormat verifies that instance IDs have correct format and sequential indices
|
||||
func verifyInstanceIDFormat(t *testing.T, profileInstanceID string, pluginNames []string, pluginIDs map[string]string) sets.Set[string] {
|
||||
if len(pluginIDs) != len(pluginNames) {
|
||||
t.Errorf("Profile %s: expected %d plugins to be invoked, got %d", profileInstanceID, len(pluginNames), len(pluginIDs))
|
||||
}
|
||||
|
||||
// Collect all instance IDs for this profile
|
||||
profileInstanceIDs := sets.New[string]()
|
||||
for pluginName, instanceID := range pluginIDs {
|
||||
if instanceID == "" {
|
||||
t.Errorf("Profile %s, plugin %s: expected instance ID to be set, got empty string", profileInstanceID, pluginName)
|
||||
}
|
||||
profileInstanceIDs.Insert(instanceID)
|
||||
}
|
||||
|
||||
// Verify all IDs within this profile are unique
|
||||
if profileInstanceIDs.Len() != len(pluginIDs) {
|
||||
t.Errorf("Profile %s: duplicate instance IDs found", profileInstanceID)
|
||||
}
|
||||
|
||||
// Verify all IDs match the expected format: "{profileInstanceID}-{index}"
|
||||
// and contain sequential indices from 0 to n-1
|
||||
expectedIndices := sets.New[int]()
|
||||
for i := 0; i < len(pluginNames); i++ {
|
||||
expectedIndices.Insert(i)
|
||||
}
|
||||
actualIndices := sets.New[int]()
|
||||
for pluginName, instanceID := range pluginIDs {
|
||||
var idx int
|
||||
expectedPrefix := profileInstanceID + "-"
|
||||
if !strings.HasPrefix(instanceID, expectedPrefix) {
|
||||
t.Errorf("Profile %s, plugin %s: instance ID %s does not start with %s", profileInstanceID, pluginName, instanceID, expectedPrefix)
|
||||
continue
|
||||
}
|
||||
_, err := fmt.Sscanf(instanceID, profileInstanceID+"-%d", &idx)
|
||||
if err != nil {
|
||||
t.Errorf("Profile %s, plugin %s: instance ID %s does not match expected format", profileInstanceID, pluginName, instanceID)
|
||||
continue
|
||||
}
|
||||
actualIndices.Insert(idx)
|
||||
}
|
||||
// Verify we have indices 0 through n-1
|
||||
diff := cmp.Diff(expectedIndices, actualIndices)
|
||||
if diff != "" {
|
||||
t.Errorf("Profile %s: instance ID indices mismatch (-want +got):\n%s", profileInstanceID, diff)
|
||||
}
|
||||
|
||||
return profileInstanceIDs
|
||||
}
|
||||
|
||||
func TestPluginInstanceIDs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
profiles []struct {
|
||||
profileInstanceID string
|
||||
pluginNames []string
|
||||
}
|
||||
}{
|
||||
{
|
||||
name: "single plugin gets instance ID",
|
||||
profiles: []struct {
|
||||
profileInstanceID string
|
||||
pluginNames []string
|
||||
}{
|
||||
{
|
||||
profileInstanceID: "0",
|
||||
pluginNames: []string{"TestPlugin"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "two plugins get different instance IDs",
|
||||
profiles: []struct {
|
||||
profileInstanceID string
|
||||
pluginNames []string
|
||||
}{
|
||||
{
|
||||
profileInstanceID: "0",
|
||||
pluginNames: []string{"Plugin_0", "Plugin_1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "three profiles with two plugins each get unique instance IDs",
|
||||
profiles: []struct {
|
||||
profileInstanceID string
|
||||
pluginNames []string
|
||||
}{
|
||||
{
|
||||
profileInstanceID: "0",
|
||||
pluginNames: []string{"Plugin_A", "Plugin_B"},
|
||||
},
|
||||
{
|
||||
profileInstanceID: "1",
|
||||
pluginNames: []string{"Plugin_C", "Plugin_D"},
|
||||
},
|
||||
{
|
||||
profileInstanceID: "2",
|
||||
pluginNames: []string{"Plugin_E", "Plugin_F"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "three profiles with same plugin names get different instance IDs per profile",
|
||||
profiles: []struct {
|
||||
profileInstanceID string
|
||||
pluginNames []string
|
||||
}{
|
||||
{
|
||||
profileInstanceID: "0",
|
||||
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
|
||||
},
|
||||
{
|
||||
profileInstanceID: "1",
|
||||
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
|
||||
},
|
||||
{
|
||||
profileInstanceID: "2",
|
||||
pluginNames: []string{"CommonPlugin_X", "CommonPlugin_Y"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
n1 := testutils.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := testutils.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{n1, n2}
|
||||
|
||||
// Track instance IDs by profile from different stages
|
||||
profileDescheduleIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from Deschedule execution)
|
||||
profileBalanceIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from Balance execution)
|
||||
profileCreationIDs := make(map[string]map[string]string) // profileInstanceID -> pluginName -> instanceID (from plugin creation)
|
||||
registry := pluginregistry.NewRegistry()
|
||||
|
||||
// Collect all distinct plugin names across all profiles
|
||||
allPluginNames := sets.New[string]()
|
||||
for _, profileCfg := range test.profiles {
|
||||
allPluginNames.Insert(profileCfg.pluginNames...)
|
||||
}
|
||||
|
||||
// Helper function to validate and store instance ID
|
||||
captureInstanceID := func(instanceID, pluginName string, targetMap map[string]map[string]string) {
|
||||
parts := strings.Split(instanceID, "-")
|
||||
if len(parts) < 2 {
|
||||
t.Fatalf("Plugin %s: instance ID %s does not have expected format 'profileID-index'", pluginName, instanceID)
|
||||
}
|
||||
profileID := parts[0]
|
||||
if targetMap[profileID] == nil {
|
||||
targetMap[profileID] = make(map[string]string)
|
||||
}
|
||||
targetMap[profileID][pluginName] = instanceID
|
||||
}
|
||||
|
||||
// Register all plugins before creating profiles
|
||||
for _, pluginName := range allPluginNames.UnsortedList() {
|
||||
// Capture plugin name for closure
|
||||
name := pluginName
|
||||
|
||||
pluginregistry.Register(
|
||||
pluginName,
|
||||
func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePlugin := &fakeplugin.FakePlugin{PluginName: name}
|
||||
|
||||
fakePlugin.AddReactor(string(frameworktypes.DescheduleExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if dAction, ok := action.(fakeplugin.DescheduleAction); ok {
|
||||
captureInstanceID(dAction.Handle().PluginInstanceID(), name, profileDescheduleIDs)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
|
||||
fakePlugin.AddReactor(string(frameworktypes.BalanceExtensionPoint), func(action fakeplugin.Action) (handled, filter bool, err error) {
|
||||
if bAction, ok := action.(fakeplugin.BalanceAction); ok {
|
||||
captureInstanceID(bAction.Handle().PluginInstanceID(), name, profileBalanceIDs)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
})
|
||||
|
||||
// Use NewPluginFncFromFakeWithReactor to wrap and capture instance ID at creation
|
||||
builder := fakeplugin.NewPluginFncFromFakeWithReactor(fakePlugin, func(action fakeplugin.ActionImpl) {
|
||||
captureInstanceID(action.Handle().PluginInstanceID(), name, profileCreationIDs)
|
||||
})
|
||||
|
||||
return builder(ctx, args, handle)
|
||||
},
|
||||
&fakeplugin.FakePlugin{},
|
||||
&fakeplugin.FakePluginArgs{},
|
||||
fakeplugin.ValidateFakePluginArgs,
|
||||
fakeplugin.SetDefaults_FakePluginArgs,
|
||||
registry,
|
||||
)
|
||||
}
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
client,
|
||||
nil,
|
||||
defaultevictor.DefaultEvictorArgs{},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
// Create all profiles
|
||||
var profiles []*profileImpl
|
||||
for _, profileCfg := range test.profiles {
|
||||
|
||||
var pluginConfigs []api.PluginConfig
|
||||
for _, pluginName := range profileCfg.pluginNames {
|
||||
pluginConfigs = append(pluginConfigs, api.PluginConfig{
|
||||
Name: pluginName,
|
||||
Args: &fakeplugin.FakePluginArgs{},
|
||||
})
|
||||
}
|
||||
|
||||
prfl, err := NewProfile(
|
||||
ctx,
|
||||
api.DeschedulerProfile{
|
||||
Name: "test-profile",
|
||||
PluginConfigs: pluginConfigs,
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: profileCfg.pluginNames,
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: profileCfg.pluginNames,
|
||||
},
|
||||
},
|
||||
},
|
||||
registry,
|
||||
WithClientSet(client),
|
||||
WithSharedInformerFactory(handle.SharedInformerFactoryImpl),
|
||||
WithPodEvictor(podEvictor),
|
||||
WithGetPodsAssignedToNodeFnc(handle.GetPodsAssignedToNodeFuncImpl),
|
||||
WithProfileInstanceID(profileCfg.profileInstanceID),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create profile: %v", err)
|
||||
}
|
||||
profiles = append(profiles, prfl)
|
||||
}
|
||||
|
||||
// Run deschedule and balance plugins for all profiles
|
||||
for _, prfl := range profiles {
|
||||
prfl.RunDeschedulePlugins(ctx, nodes)
|
||||
prfl.RunBalancePlugins(ctx, nodes)
|
||||
}
|
||||
|
||||
// Verify creation, deschedule, and balance IDs all match
|
||||
for _, profileCfg := range test.profiles {
|
||||
verifyInstanceIDsMatch(
|
||||
t,
|
||||
profileCfg.profileInstanceID,
|
||||
profileCfg.pluginNames,
|
||||
profileCreationIDs[profileCfg.profileInstanceID],
|
||||
profileDescheduleIDs[profileCfg.profileInstanceID],
|
||||
profileBalanceIDs[profileCfg.profileInstanceID],
|
||||
)
|
||||
}
|
||||
|
||||
// Verify all plugins were invoked and have correct instance IDs
|
||||
allInstanceIDs := sets.New[string]()
|
||||
for _, profileCfg := range test.profiles {
|
||||
profileInstanceIDs := verifyInstanceIDFormat(
|
||||
t,
|
||||
profileCfg.profileInstanceID,
|
||||
profileCfg.pluginNames,
|
||||
profileDescheduleIDs[profileCfg.profileInstanceID],
|
||||
)
|
||||
allInstanceIDs = allInstanceIDs.Union(profileInstanceIDs)
|
||||
}
|
||||
|
||||
// Verify all instance IDs are unique across all profiles
|
||||
totalExpectedPlugins := 0
|
||||
for _, profileCfg := range test.profiles {
|
||||
totalExpectedPlugins += len(profileCfg.pluginNames)
|
||||
}
|
||||
if allInstanceIDs.Len() != totalExpectedPlugins {
|
||||
t.Errorf("Expected %d unique instance IDs across all profiles, got %d", totalExpectedPlugins, allInstanceIDs.Len())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,9 @@ type Handle interface {
|
||||
GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc
|
||||
SharedInformerFactory() informers.SharedInformerFactory
|
||||
MetricsCollector() *metricscollector.MetricsCollector
|
||||
// PluginInstanceID returns a unique identifier for this plugin instance.
|
||||
// The ID is unique across all plugin instances in a configuration.
|
||||
PluginInstanceID() string
|
||||
}
|
||||
|
||||
// Evictor defines an interface for filtering and evicting pods
|
||||
|
||||
@@ -110,6 +110,12 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st
|
||||
opts = append(opts, otlptracegrpc.WithInsecure())
|
||||
}
|
||||
|
||||
if os.Getenv("USER") == "" {
|
||||
if err := os.Setenv("USER", "descheduler"); err != nil {
|
||||
klog.ErrorS(err, "failed to set USER environment variable")
|
||||
}
|
||||
}
|
||||
|
||||
client := otlptracegrpc.NewClient(opts...)
|
||||
|
||||
exporter, err := otlptrace.New(ctx, client)
|
||||
|
||||
@@ -11,24 +11,10 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
resourcehelper "k8s.io/component-helpers/resource"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// GetResourceRequest finds and returns the request value for a specific resource.
|
||||
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
if resource == v1.ResourcePods {
|
||||
return 1
|
||||
}
|
||||
|
||||
requestQuantity := GetResourceRequestQuantity(pod, resource)
|
||||
|
||||
if resource == v1.ResourceCPU {
|
||||
return requestQuantity.MilliValue()
|
||||
}
|
||||
|
||||
return requestQuantity.Value()
|
||||
}
|
||||
|
||||
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
|
||||
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
|
||||
requestQuantity := resource.Quantity{}
|
||||
@@ -42,26 +28,8 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
requestQuantity.Add(rQuantity)
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
if requestQuantity.Cmp(rQuantity) < 0 {
|
||||
requestQuantity = rQuantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||
requestQuantity.Add(podOverhead)
|
||||
}
|
||||
if rQuantity, ok := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{})[resourceName]; ok {
|
||||
requestQuantity.Add(rQuantity)
|
||||
}
|
||||
|
||||
return requestQuantity
|
||||
@@ -171,59 +139,9 @@ func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||
// total container resource requests and to the total container limits which have a
|
||||
// non-zero quantity.
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
addResourceList(reqs, container.Resources.Requests)
|
||||
addResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
// init containers define the minimum of any resource
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
maxResourceList(reqs, container.Resources.Requests)
|
||||
maxResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
if value, ok := limits[name]; ok && !value.IsZero() {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// addResourceList adds the resources in newList to list
|
||||
func addResourceList(list, newList v1.ResourceList) {
|
||||
for name, quantity := range newList {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = quantity.DeepCopy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
list[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maxResourceList sets list to the greater of list/newList for every resource
|
||||
// either list
|
||||
func maxResourceList(list, new v1.ResourceList) {
|
||||
for name, quantity := range new {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = quantity.DeepCopy()
|
||||
continue
|
||||
} else {
|
||||
if quantity.Cmp(value) > 0 {
|
||||
list[name] = quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (v1.ResourceList, v1.ResourceList) {
|
||||
opts := resourcehelper.PodResourcesOptions{}
|
||||
return resourcehelper.PodRequests(pod, opts), resourcehelper.PodLimits(pod, opts)
|
||||
}
|
||||
|
||||
// PodToleratesTaints returns true if a pod tolerates one node's taints
|
||||
|
||||
373
test/e2e/e2e_podswithpvc_test.go
Normal file
373
test/e2e/e2e_podswithpvc_test.go
Normal file
@@ -0,0 +1,373 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/component-base/config"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
apiv1alpha2 "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
)
|
||||
|
||||
// protectPodsWithPVCPolicy returns a descheduler policy that protects pods
|
||||
// using PVCs of specific storage classes from eviction while, at the same
|
||||
// time, evicting pods that have restarted more than 3 times.
|
||||
func protectPodsWithPVCPolicy(namespace string, protectedsc []defaultevictor.ProtectedStorageClass) *apiv1alpha2.DeschedulerPolicy {
|
||||
return &apiv1alpha2.DeschedulerPolicy{
|
||||
Profiles: []apiv1alpha2.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProtectPodsWithPVCPolicy",
|
||||
PluginConfigs: []apiv1alpha2.PluginConfig{
|
||||
{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 3,
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{namespace},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: runtime.RawExtension{
|
||||
Object: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
},
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
},
|
||||
Config: &defaultevictor.PodProtectionsConfig{
|
||||
PodsWithPVC: &defaultevictor.PodsWithPVCConfig{
|
||||
ProtectedStorageClasses: protectedsc,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: apiv1alpha2.Plugins{
|
||||
Filter: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Deschedule: apiv1alpha2.PluginSet{
|
||||
Enabled: []string{
|
||||
removepodshavingtoomanyrestarts.PluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestProtectPodsWithPVC tests that pods using PVCs are protected.
|
||||
func TestProtectPodsWithPVC(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
initPluginRegistry()
|
||||
|
||||
cli, err := client.CreateClient(
|
||||
config.ClientConnectionConfiguration{
|
||||
Kubeconfig: os.Getenv("KUBECONFIG"),
|
||||
}, "",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error during kubernetes client creation with %v", err)
|
||||
}
|
||||
|
||||
// start by finding out what is the default storage class in the
|
||||
// cluster. if none is found then this test can't run.
|
||||
scs, err := cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("error listing storage classes: %v", err)
|
||||
}
|
||||
|
||||
var defclass *storagev1.StorageClass
|
||||
for _, sc := range scs.Items {
|
||||
if _, ok := sc.Annotations["storageclass.kubernetes.io/is-default-class"]; ok {
|
||||
defclass = &sc
|
||||
break
|
||||
}
|
||||
}
|
||||
if defclass == nil {
|
||||
t.Fatalf("no default storage class found, unable to run the test")
|
||||
}
|
||||
|
||||
// now we replicate the default storage class so we have two different
|
||||
// storage classes in the cluster. this is useful to test protected vs
|
||||
// unprotected pods using PVCs.
|
||||
unprotectedsc := defclass.DeepCopy()
|
||||
delete(unprotectedsc.Annotations, "storageclass.kubernetes.io/is-default-class")
|
||||
unprotectedsc.ResourceVersion = ""
|
||||
unprotectedsc.Name = "unprotected"
|
||||
if _, err = cli.StorageV1().StorageClasses().Create(ctx, unprotectedsc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("error creating unprotected storage class: %v", err)
|
||||
}
|
||||
defer cli.StorageV1().StorageClasses().Delete(ctx, unprotectedsc.Name, metav1.DeleteOptions{})
|
||||
|
||||
// this is the namespace we are going to use for all testing
|
||||
t.Logf("creating testing namespace %v", t.Name())
|
||||
namespace := &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("e2e-%s", strings.ToLower(t.Name())),
|
||||
},
|
||||
}
|
||||
if _, err := cli.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", namespace.Name)
|
||||
}
|
||||
defer cli.CoreV1().Namespaces().Delete(ctx, namespace.Name, metav1.DeleteOptions{})
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
policy *apiv1alpha2.DeschedulerPolicy
|
||||
enableGracePeriod bool
|
||||
expectedEvictedPodCount uint
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
volumes []v1.Volume
|
||||
}{
|
||||
{
|
||||
name: "evict pods from unprotected storage class",
|
||||
policy: protectPodsWithPVCPolicy(
|
||||
namespace.Name, []defaultevictor.ProtectedStorageClass{
|
||||
{
|
||||
Name: defclass.Name,
|
||||
},
|
||||
},
|
||||
),
|
||||
expectedEvictedPodCount: 4,
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-unprotected-claim",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To(unprotectedsc.Name),
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumes: []v1.Volume{
|
||||
{
|
||||
Name: "test-unprotected-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "test-unprotected-claim",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preserve pods from protected storage class",
|
||||
policy: protectPodsWithPVCPolicy(
|
||||
namespace.Name, []defaultevictor.ProtectedStorageClass{
|
||||
{
|
||||
Name: defclass.Name,
|
||||
},
|
||||
},
|
||||
),
|
||||
expectedEvictedPodCount: 0,
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-protected-claim",
|
||||
Namespace: namespace.Name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
volumes: []v1.Volume{
|
||||
{
|
||||
Name: "test-protected-volume",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "test-protected-claim",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Logf("creating testing pvcs in namespace %v", namespace.Name)
|
||||
for _, pvc := range tc.pvcs {
|
||||
if _, err = cli.CoreV1().PersistentVolumeClaims(namespace.Name).Create(ctx, pvc, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("error creating PVC: %v", err)
|
||||
}
|
||||
defer cli.CoreV1().PersistentVolumeClaims(namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
deploy := buildTestDeployment(
|
||||
"restart-pod",
|
||||
namespace.Name,
|
||||
4,
|
||||
map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
|
||||
func(deployment *appsv1.Deployment) {
|
||||
deployment.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"}
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = []string{"-c", "sleep 1s && exit 1"}
|
||||
},
|
||||
)
|
||||
deploy.Spec.Template.Spec.Volumes = tc.volumes
|
||||
|
||||
t.Logf("creating deployment %v", deploy.Name)
|
||||
if _, err := cli.AppsV1().Deployments(deploy.Namespace).Create(ctx, deploy, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("error creating deployment: %v", err)
|
||||
}
|
||||
defer cli.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{})
|
||||
|
||||
// wait for 3 restarts
|
||||
waitPodRestartCount(ctx, cli, namespace.Name, t, 3)
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to initialize server: %v\n", err)
|
||||
}
|
||||
rs.Client, rs.EventClient, rs.DefaultFeatureGates = cli, cli, initFeatureGates()
|
||||
preRunNames := sets.NewString(getCurrentPodNames(ctx, cli, namespace.Name, t)...)
|
||||
|
||||
// deploy the descheduler with the configured policy
|
||||
policycm, err := deschedulerPolicyConfigMap(tc.policy)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating %q CM: %v", policycm.Name, err)
|
||||
}
|
||||
|
||||
t.Logf("creating %q policy CM with PodsWithPVC protection enabled...", policycm.Name)
|
||||
if _, err = cli.CoreV1().ConfigMaps(policycm.Namespace).Create(
|
||||
ctx, policycm, metav1.CreateOptions{},
|
||||
); err != nil {
|
||||
t.Fatalf("error creating %q CM: %v", policycm.Name, err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Logf("deleting %q CM...", policycm.Name)
|
||||
if err := cli.CoreV1().ConfigMaps(policycm.Namespace).Delete(
|
||||
ctx, policycm.Name, metav1.DeleteOptions{},
|
||||
); err != nil {
|
||||
t.Fatalf("unable to delete %q CM: %v", policycm.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
desdep := deschedulerDeployment(namespace.Name)
|
||||
t.Logf("creating descheduler deployment %v", desdep.Name)
|
||||
if _, err := cli.AppsV1().Deployments(desdep.Namespace).Create(
|
||||
ctx, desdep, metav1.CreateOptions{},
|
||||
); err != nil {
|
||||
t.Fatalf("error creating %q deployment: %v", desdep.Name, err)
|
||||
}
|
||||
|
||||
deschedulerPodName := ""
|
||||
defer func() {
|
||||
if deschedulerPodName != "" {
|
||||
printPodLogs(ctx, t, cli, deschedulerPodName)
|
||||
}
|
||||
|
||||
t.Logf("deleting %q deployment...", desdep.Name)
|
||||
if err := cli.AppsV1().Deployments(desdep.Namespace).Delete(
|
||||
ctx, desdep.Name, metav1.DeleteOptions{},
|
||||
); err != nil {
|
||||
t.Fatalf("unable to delete %q deployment: %v", desdep.Name, err)
|
||||
}
|
||||
|
||||
waitForPodsToDisappear(ctx, t, cli, desdep.Labels, desdep.Namespace)
|
||||
}()
|
||||
|
||||
t.Logf("waiting for the descheduler pod running")
|
||||
deschedulerPods := waitForPodsRunning(ctx, t, cli, desdep.Labels, 1, desdep.Namespace)
|
||||
if len(deschedulerPods) != 0 {
|
||||
deschedulerPodName = deschedulerPods[0].Name
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(
|
||||
ctx, 5*time.Second, time.Minute, true,
|
||||
func(ctx context.Context) (bool, error) {
|
||||
podList, err := cli.CoreV1().Pods(namespace.Name).List(
|
||||
ctx, metav1.ListOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("error listing pods: %v", err)
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
for _, item := range podList.Items {
|
||||
names = append(names, item.Name)
|
||||
}
|
||||
|
||||
currentRunNames := sets.NewString(names...)
|
||||
actualEvictedPod := preRunNames.Difference(currentRunNames)
|
||||
actualEvictedPodCount := uint(actualEvictedPod.Len())
|
||||
if actualEvictedPodCount < tc.expectedEvictedPodCount {
|
||||
t.Logf(
|
||||
"expecting %v number of pods evicted, got %v instead",
|
||||
tc.expectedEvictedPodCount, actualEvictedPodCount,
|
||||
)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
},
|
||||
); err != nil {
|
||||
t.Fatalf("error waiting for descheduler running: %v", err)
|
||||
}
|
||||
|
||||
waitForTerminatingPodsToDisappear(ctx, t, cli, namespace.Name)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -29,10 +29,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
clientgotesting "k8s.io/client-go/testing"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
@@ -89,6 +92,25 @@ func BuildTestPDB(name, appLabel string) *policyv1.PodDisruptionBudget {
|
||||
return pdb
|
||||
}
|
||||
|
||||
func BuildTestPVC(name, storageClass string) *v1.PersistentVolumeClaim {
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &storageClass,
|
||||
Resources: v1.VolumeResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pvc
|
||||
}
|
||||
|
||||
// BuildPodMetrics creates a test podmetrics with given parameters.
|
||||
func BuildPodMetrics(name string, millicpu, mem int64) *v1beta1.PodMetrics {
|
||||
return &v1beta1.PodMetrics{
|
||||
@@ -230,11 +252,30 @@ func SetNormalOwnerRef(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = GetNormalPodOwnerRefList()
|
||||
}
|
||||
|
||||
// SetMirrorPodAnnotation sets the given pod's annotations to mirror pod annotations
|
||||
func SetMirrorPodAnnotation(pod *v1.Pod) {
|
||||
pod.Annotations = GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
// SetPodPriority sets the given pod's priority
|
||||
func SetPodPriority(pod *v1.Pod, priority int32) {
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
|
||||
func SetHostPathEmptyDirVolumeSource(pod *v1.Pod) {
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SetNodeUnschedulable sets the given node unschedulable
|
||||
func SetNodeUnschedulable(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
@@ -322,3 +363,18 @@ func PodWithPodAntiAffinity(inputPod *v1.Pod, labelKey, labelValue string) *v1.P
|
||||
inputPod.Labels = map[string]string{labelKey: labelValue}
|
||||
return inputPod
|
||||
}
|
||||
|
||||
func RegisterEvictedPodsCollector(fakeClient *fake.Clientset, evictedPods *[]string) {
|
||||
fakeClient.PrependReactor("create", "pods", func(action clientgotesting.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(clientgotesting.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policyv1.Eviction); matched {
|
||||
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
})
|
||||
}
|
||||
|
||||
13
vendor/k8s.io/component-helpers/resource/OWNERS
generated
vendored
Normal file
13
vendor/k8s.io/component-helpers/resource/OWNERS
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
options:
|
||||
no_parent_owners: true
|
||||
approvers:
|
||||
- api-approvers
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
- sig-scheduling
|
||||
labels:
|
||||
- sig/node
|
||||
- sig/scheduling
|
||||
- kind/api-change
|
||||
455
vendor/k8s.io/component-helpers/resource/helpers.go
generated
vendored
Normal file
455
vendor/k8s.io/component-helpers/resource/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ContainerType signifies container type
|
||||
type ContainerType int
|
||||
|
||||
const (
|
||||
// Containers is for normal containers
|
||||
Containers ContainerType = 1 << iota
|
||||
// InitContainers is for init containers
|
||||
InitContainers
|
||||
)
|
||||
|
||||
// PodResourcesOptions controls the behavior of PodRequests and PodLimits.
|
||||
type PodResourcesOptions struct {
|
||||
// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
|
||||
// functions. All existing values in Reuse will be lost.
|
||||
Reuse v1.ResourceList
|
||||
// UseStatusResources indicates whether resources reported by the PodStatus should be considered
|
||||
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
|
||||
// feature is not enabled.
|
||||
UseStatusResources bool
|
||||
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
|
||||
ExcludeOverhead bool
|
||||
// ContainerFn is called with the effective resources required for each container within the pod.
|
||||
ContainerFn func(res v1.ResourceList, containerType ContainerType)
|
||||
// NonMissingContainerRequests if provided will replace any missing container level requests for the specified resources
|
||||
// with the given values. If the requests for those resources are explicitly set, even if zero, they will not be modified.
|
||||
NonMissingContainerRequests v1.ResourceList
|
||||
// SkipPodLevelResources controls whether pod-level resources should be skipped
|
||||
// from the calculation. If pod-level resources are not set in PodSpec,
|
||||
// pod-level resources will always be skipped.
|
||||
SkipPodLevelResources bool
|
||||
// SkipContainerLevelResources
|
||||
SkipContainerLevelResources bool
|
||||
}
|
||||
|
||||
var supportedPodLevelResources = sets.New(v1.ResourceCPU, v1.ResourceMemory)
|
||||
|
||||
func SupportedPodLevelResources() sets.Set[v1.ResourceName] {
|
||||
return supportedPodLevelResources.Clone().Insert(v1.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// IsSupportedPodLevelResources checks if a given resource is supported by pod-level
|
||||
// resource management through the PodLevelResources feature. Returns true if
|
||||
// the resource is supported.
|
||||
func IsSupportedPodLevelResource(name v1.ResourceName) bool {
|
||||
return supportedPodLevelResources.Has(name) || strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// IsPodLevelResourcesSet check if PodLevelResources pod-level resources are set.
|
||||
// It returns true if either the Requests or Limits maps are non-empty.
|
||||
func IsPodLevelResourcesSet(pod *v1.Pod) bool {
|
||||
if pod.Spec.Resources == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if (len(pod.Spec.Resources.Requests) + len(pod.Spec.Resources.Limits)) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for resourceName := range pod.Spec.Resources.Requests {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
for resourceName := range pod.Spec.Resources.Limits {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodLevelRequestsSet checks if pod-level requests are set. It returns true if
|
||||
// Requests map is non-empty.
|
||||
func IsPodLevelRequestsSet(pod *v1.Pod) bool {
|
||||
if pod.Spec.Resources == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(pod.Spec.Resources.Requests) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for resourceName := range pod.Spec.Resources.Requests {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodLevelLimitsSet checks if pod-level limits are set. It returns true if
|
||||
// Limits map is non-empty and contains at least one supported pod-level resource.
|
||||
func IsPodLevelLimitsSet(pod *v1.Pod) bool {
|
||||
if pod.Spec.Resources == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(pod.Spec.Resources.Limits) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for resourceName := range pod.Spec.Resources.Limits {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// PodRequests computes the total pod requests per the PodResourcesOptions supplied.
|
||||
// If PodResourcesOptions is nil, then the requests are returned including pod overhead.
|
||||
// If the PodLevelResources feature is enabled AND the pod-level resources are set,
|
||||
// those pod-level values are used in calculating Pod Requests.
|
||||
// The computation is part of the API and must be reviewed as an API change.
|
||||
func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
reqs := v1.ResourceList{}
|
||||
if !opts.SkipContainerLevelResources {
|
||||
reqs = AggregateContainerRequests(pod, opts)
|
||||
}
|
||||
|
||||
if !opts.SkipPodLevelResources && IsPodLevelRequestsSet(pod) {
|
||||
for resourceName, quantity := range pod.Spec.Resources.Requests {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
reqs[resourceName] = quantity
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add overhead for running a pod to the sum of requests if requested:
|
||||
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
}
|
||||
|
||||
return reqs
|
||||
}
|
||||
|
||||
// AggregateContainerRequests computes the total resource requests of all the containers
|
||||
// in a pod. This computation folows the formula defined in the KEP for sidecar
|
||||
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
|
||||
// for more details.
|
||||
func AggregateContainerRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
// attempt to reuse the maps if passed, or allocate otherwise
|
||||
reqs := reuseOrClearResourceList(opts.Reuse)
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerReqs := container.Resources.Requests
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.NonMissingContainerRequests) > 0 {
|
||||
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
|
||||
}
|
||||
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(containerReqs, Containers)
|
||||
}
|
||||
|
||||
addResourceList(reqs, containerReqs)
|
||||
}
|
||||
|
||||
restartableInitContainerReqs := v1.ResourceList{}
|
||||
initContainerReqs := v1.ResourceList{}
|
||||
// init containers define the minimum of any resource
|
||||
//
|
||||
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
|
||||
// init container is initializing, then
|
||||
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
|
||||
//
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
containerReqs := container.Resources.Requests
|
||||
if opts.UseStatusResources {
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerReqs = determineContainerReqs(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.NonMissingContainerRequests) > 0 {
|
||||
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
|
||||
}
|
||||
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
// and add them to the resulting cumulative container requests
|
||||
addResourceList(reqs, containerReqs)
|
||||
|
||||
// track our cumulative restartable init container resources
|
||||
addResourceList(restartableInitContainerReqs, containerReqs)
|
||||
containerReqs = restartableInitContainerReqs
|
||||
} else {
|
||||
tmp := v1.ResourceList{}
|
||||
addResourceList(tmp, containerReqs)
|
||||
addResourceList(tmp, restartableInitContainerReqs)
|
||||
containerReqs = tmp
|
||||
}
|
||||
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(containerReqs, InitContainers)
|
||||
}
|
||||
maxResourceList(initContainerReqs, containerReqs)
|
||||
}
|
||||
|
||||
maxResourceList(reqs, initContainerReqs)
|
||||
return reqs
|
||||
}
|
||||
|
||||
// determineContainerReqs will return a copy of the container requests based on if resizing is feasible or not.
|
||||
func determineContainerReqs(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||
if IsPodResizeInfeasible(pod) {
|
||||
return max(cs.Resources.Requests, cs.AllocatedResources)
|
||||
}
|
||||
return max(container.Resources.Requests, cs.Resources.Requests, cs.AllocatedResources)
|
||||
}
|
||||
|
||||
// determineContainerLimits will return a copy of the container limits based on if resizing is feasible or not.
|
||||
func determineContainerLimits(pod *v1.Pod, container *v1.Container, cs *v1.ContainerStatus) v1.ResourceList {
|
||||
if IsPodResizeInfeasible(pod) {
|
||||
return cs.Resources.Limits.DeepCopy()
|
||||
}
|
||||
return max(container.Resources.Limits, cs.Resources.Limits)
|
||||
}
|
||||
|
||||
// IsPodResizeInfeasible returns true if the pod condition PodResizePending is set to infeasible.
|
||||
func IsPodResizeInfeasible(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodResizePending {
|
||||
return condition.Reason == v1.PodReasonInfeasible
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodResizeDeferred returns true if the pod condition PodResizePending is set to deferred.
|
||||
func IsPodResizeDeferred(pod *v1.Pod) bool {
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == v1.PodResizePending {
|
||||
return condition.Reason == v1.PodReasonDeferred
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// applyNonMissing will return a copy of the given resource list with any missing values replaced by the nonMissing values
|
||||
func applyNonMissing(reqs v1.ResourceList, nonMissing v1.ResourceList) v1.ResourceList {
|
||||
cp := v1.ResourceList{}
|
||||
for k, v := range reqs {
|
||||
cp[k] = v.DeepCopy()
|
||||
}
|
||||
|
||||
for k, v := range nonMissing {
|
||||
if _, found := reqs[k]; !found {
|
||||
rk := cp[k]
|
||||
rk.Add(v)
|
||||
cp[k] = rk
|
||||
}
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// PodLimits computes the pod limits per the PodResourcesOptions supplied. If PodResourcesOptions is nil, then
|
||||
// the limits are returned including pod overhead for any non-zero limits. The computation is part of the API and must be reviewed
|
||||
// as an API change.
|
||||
func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
// attempt to reuse the maps if passed, or allocate otherwise
|
||||
limits := AggregateContainerLimits(pod, opts)
|
||||
if !opts.SkipPodLevelResources && IsPodLevelResourcesSet(pod) {
|
||||
for resourceName, quantity := range pod.Spec.Resources.Limits {
|
||||
if IsSupportedPodLevelResource(resourceName) {
|
||||
limits[resourceName] = quantity
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add overhead to non-zero limits if requested:
|
||||
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
if value, ok := limits[name]; ok && !value.IsZero() {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return limits
|
||||
}
|
||||
|
||||
// AggregateContainerLimits computes the aggregated resource limits of all the containers
|
||||
// in a pod. This computation follows the formula defined in the KEP for sidecar
|
||||
// containers. See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#resources-calculation-for-scheduling-and-pod-admission
|
||||
// for more details.
|
||||
func AggregateContainerLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
// attempt to reuse the maps if passed, or allocate otherwise
|
||||
limits := reuseOrClearResourceList(opts.Reuse)
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses)+len(pod.Status.InitContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
containerStatuses[pod.Status.InitContainerStatuses[i].Name] = &pod.Status.InitContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerLimits := container.Resources.Limits
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(containerLimits, Containers)
|
||||
}
|
||||
addResourceList(limits, containerLimits)
|
||||
}
|
||||
|
||||
restartableInitContainerLimits := v1.ResourceList{}
|
||||
initContainerLimits := v1.ResourceList{}
|
||||
// init containers define the minimum of any resource
|
||||
//
|
||||
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
|
||||
// init container is initializing, then
|
||||
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
|
||||
//
|
||||
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
containerLimits := container.Resources.Limits
|
||||
if opts.UseStatusResources {
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
containerLimits = determineContainerLimits(pod, &container, cs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the init container marked as a restartable init container?
|
||||
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
|
||||
addResourceList(limits, containerLimits)
|
||||
|
||||
// track our cumulative restartable init container resources
|
||||
addResourceList(restartableInitContainerLimits, containerLimits)
|
||||
containerLimits = restartableInitContainerLimits
|
||||
} else {
|
||||
tmp := v1.ResourceList{}
|
||||
addResourceList(tmp, containerLimits)
|
||||
addResourceList(tmp, restartableInitContainerLimits)
|
||||
containerLimits = tmp
|
||||
}
|
||||
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(containerLimits, InitContainers)
|
||||
}
|
||||
maxResourceList(initContainerLimits, containerLimits)
|
||||
}
|
||||
|
||||
maxResourceList(limits, initContainerLimits)
|
||||
return limits
|
||||
}
|
||||
|
||||
// addResourceList adds the resources in newList to list.
|
||||
func addResourceList(list, newList v1.ResourceList) {
|
||||
for name, quantity := range newList {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = quantity.DeepCopy()
|
||||
} else {
|
||||
value.Add(quantity)
|
||||
list[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maxResourceList sets list to the greater of list/newList for every resource in newList
|
||||
func maxResourceList(list, newList v1.ResourceList) {
|
||||
for name, quantity := range newList {
|
||||
if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 {
|
||||
list[name] = quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the result of max(a, b...) for each named resource and is only used if we can't
|
||||
// accumulate into an existing resource list
|
||||
func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList {
|
||||
var result v1.ResourceList
|
||||
if a != nil {
|
||||
result = a.DeepCopy()
|
||||
} else {
|
||||
result = v1.ResourceList{}
|
||||
}
|
||||
for _, other := range b {
|
||||
maxResourceList(result, other)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// reuseOrClearResourceList is a helper for avoiding excessive allocations of
|
||||
// resource lists within the inner loop of resource calculations.
|
||||
func reuseOrClearResourceList(reuse v1.ResourceList) v1.ResourceList {
|
||||
if reuse == nil {
|
||||
return make(v1.ResourceList, 4)
|
||||
}
|
||||
for k := range reuse {
|
||||
delete(reuse, k)
|
||||
}
|
||||
return reuse
|
||||
}
|
||||
1
vendor/modules.txt
vendored
1
vendor/modules.txt
vendored
@@ -1300,6 +1300,7 @@ k8s.io/component-base/zpages/httputil
|
||||
k8s.io/component-base/zpages/statusz
|
||||
# k8s.io/component-helpers v0.34.0
|
||||
## explicit; go 1.24.0
|
||||
k8s.io/component-helpers/resource
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
# k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f
|
||||
|
||||
Reference in New Issue
Block a user