mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
189 Commits
release-1.
...
deschedule
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b0744c5b2 | ||
|
|
6e30321989 | ||
|
|
b094acb572 | ||
|
|
9f15e02245 | ||
|
|
3bf40c830a | ||
|
|
c9c03ee536 | ||
|
|
f19a297d64 | ||
|
|
2c005600cc | ||
|
|
b35e93ec7a | ||
|
|
4d6a0f1c0e | ||
|
|
73432b788c | ||
|
|
33868c44df | ||
|
|
4989cc3b6c | ||
|
|
ab6a3ca2d6 | ||
|
|
fdd69106a3 | ||
|
|
0f1890e5cd | ||
|
|
ed6a133449 | ||
|
|
0b505946bf | ||
|
|
dbe4423749 | ||
|
|
a300009b5d | ||
|
|
9fa48cd97e | ||
|
|
0cf1fc906e | ||
|
|
4e4c5f79fb | ||
|
|
8abb3509f9 | ||
|
|
3eece465fb | ||
|
|
33a747096b | ||
|
|
f6fe8fd0bd | ||
|
|
29c0a90998 | ||
|
|
640b675e86 | ||
|
|
c0c26e762b | ||
|
|
91e5e06b5f | ||
|
|
df7791fafa | ||
|
|
cbade38d23 | ||
|
|
1e0b1a9840 | ||
|
|
cb0c1b660d | ||
|
|
daaa3a277e | ||
|
|
683cd7f794 | ||
|
|
2189fe4479 | ||
|
|
e4c361d902 | ||
|
|
de7cec0640 | ||
|
|
48690989da | ||
|
|
601f213b4f | ||
|
|
8e70190c8a | ||
|
|
a3146a1705 | ||
|
|
55a0812ae6 | ||
|
|
f3569b5fe2 | ||
|
|
d2bd573cdb | ||
|
|
95ef2bbec3 | ||
|
|
355cff67c1 | ||
|
|
9220a1c009 | ||
|
|
b60a3fcfeb | ||
|
|
ab467a5dd2 | ||
|
|
f66efaf8db | ||
|
|
0c9750cc7f | ||
|
|
f23967a88e | ||
|
|
a6e75fe0bd | ||
|
|
9b5026314f | ||
|
|
c56a408b2c | ||
|
|
fc1b54318f | ||
|
|
da862a5698 | ||
|
|
eb6c325553 | ||
|
|
f8e128d862 | ||
|
|
a2a45db6de | ||
|
|
d8084e8b39 | ||
|
|
b614c8bc7c | ||
|
|
9b41edd382 | ||
|
|
bc60a058ef | ||
|
|
546a39e88c | ||
|
|
578086ca8e | ||
|
|
ea2eeccff4 | ||
|
|
d0695abea9 | ||
|
|
e60f525ec6 | ||
|
|
3362fec7b0 | ||
|
|
f240648df2 | ||
|
|
a818c01832 | ||
|
|
44b59f9b1d | ||
|
|
9d16c28f43 | ||
|
|
f8afd679ed | ||
|
|
db0df6c6ca | ||
|
|
18d0e4a540 | ||
|
|
7657345079 | ||
|
|
d1118354c9 | ||
|
|
e26f6429a2 | ||
|
|
686417b6de | ||
|
|
287d1b1573 | ||
|
|
7ab36daaec | ||
|
|
f2be3fd414 | ||
|
|
9eefbf05cb | ||
|
|
cfa6845a19 | ||
|
|
8a2b2eb37c | ||
|
|
972d28108a | ||
|
|
f294d953a3 | ||
|
|
85837b1063 | ||
|
|
fadef326ff | ||
|
|
f5060adcd1 | ||
|
|
75880226c0 | ||
|
|
0901cb18bf | ||
|
|
6fdee47cbc | ||
|
|
ae15fed7e7 | ||
|
|
ebae217631 | ||
|
|
d7178984df | ||
|
|
2253e9816c | ||
|
|
cdbd101eae | ||
|
|
15551bb834 | ||
|
|
bdaff92c10 | ||
|
|
9fea59821f | ||
|
|
2df11f837a | ||
|
|
0e2478ac41 | ||
|
|
ec33490314 | ||
|
|
1c8ae64726 | ||
|
|
fd7fcbddfe | ||
|
|
69e5c5a1ef | ||
|
|
8f1102b547 | ||
|
|
9a57a37cc0 | ||
|
|
1dd21ba507 | ||
|
|
2ae79bee64 | ||
|
|
32065f0caa | ||
|
|
8714397ba6 | ||
|
|
47cc875fe8 | ||
|
|
d699454d5e | ||
|
|
a889e57768 | ||
|
|
748495a022 | ||
|
|
c80556fc91 | ||
|
|
17af29afe4 | ||
|
|
8be82b008c | ||
|
|
fc01793949 | ||
|
|
22dfa5d559 | ||
|
|
4b8e2076e9 | ||
|
|
1c1b1a7207 | ||
|
|
2675dbedef | ||
|
|
77a0693e0c | ||
|
|
79990946eb | ||
|
|
4671199be7 | ||
|
|
a82fc7b4e4 | ||
|
|
2ac072e5da | ||
|
|
ee5bc6991d | ||
|
|
0ec8581964 | ||
|
|
2f7544344b | ||
|
|
b08f1fa8b5 | ||
|
|
fca4a0970f | ||
|
|
b1c6e24f93 | ||
|
|
fda4c96937 | ||
|
|
3ef05e9e7f | ||
|
|
4bbafe7c19 | ||
|
|
9f5fc14410 | ||
|
|
b2bb8272af | ||
|
|
149a4c11c4 | ||
|
|
2ce9d46b8c | ||
|
|
77ec804529 | ||
|
|
25b9edae3c | ||
|
|
8da68695e1 | ||
|
|
b95380641f | ||
|
|
10d0ce0dfa | ||
|
|
667df9b606 | ||
|
|
2c06a33d41 | ||
|
|
4bd348d9b7 | ||
|
|
82559025b1 | ||
|
|
a2c88582fa | ||
|
|
a321a38328 | ||
|
|
970b35d737 | ||
|
|
364f467421 | ||
|
|
dd94f2ed93 | ||
|
|
7f20b5c891 | ||
|
|
e63e159c04 | ||
|
|
b7697869f2 | ||
|
|
19ced3d630 | ||
|
|
82af9c6321 | ||
|
|
37df42df7c | ||
|
|
ed1efe436f | ||
|
|
749e81c51c | ||
|
|
dc2cf723bc | ||
|
|
d7c12c5f00 | ||
|
|
a5f322521e | ||
|
|
52d226321b | ||
|
|
bb5930eb21 | ||
|
|
6c865fdf32 | ||
|
|
af1ffe7a15 | ||
|
|
97c0044a74 | ||
|
|
c7f8670b11 | ||
|
|
99472b6223 | ||
|
|
492da1b8a9 | ||
|
|
70f3619fad | ||
|
|
0d096edbee | ||
|
|
e7980442ef | ||
|
|
c2cf78a760 | ||
|
|
5092595384 | ||
|
|
d513c5d9d7 | ||
|
|
ae7467fb27 | ||
|
|
b7a50fd772 |
26
.github/workflows/helm.yaml
vendored
26
.github/workflows/helm.yaml
vendored
@@ -20,27 +20,35 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v2.1
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.9.2
|
||||
version: v3.15.1
|
||||
|
||||
- uses: actions/setup-python@v3.1.2
|
||||
- uses: actions/setup-python@v5.1.1
|
||||
with:
|
||||
python-version: 3.7
|
||||
python-version: 3.12
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21.5'
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
with:
|
||||
version: v3.7.0
|
||||
version: v3.11.0
|
||||
|
||||
- name: Install Helm Unit Test Plugin
|
||||
run: |
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest
|
||||
|
||||
- name: Run Helm Unit Tests
|
||||
run: |
|
||||
helm unittest charts/descheduler --strict -d
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
|
||||
10
.github/workflows/manifests.yaml
vendored
10
.github/workflows/manifests.yaml
vendored
@@ -7,16 +7,16 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.29.0"]
|
||||
descheduler-version: ["v0.29.0"]
|
||||
descheduler-api: ["v1alpha1", "v1alpha2"]
|
||||
k8s-version: ["v1.31.0"]
|
||||
descheduler-version: ["v0.31.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.5.0
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
|
||||
9
.github/workflows/release.yaml
vendored
9
.github/workflows/release.yaml
vendored
@@ -5,6 +5,9 @@ on:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
permissions:
|
||||
contents: write # allow actions to update gh-pages branch
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -20,12 +23,12 @@ jobs:
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.7.0
|
||||
version: v3.15.1
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
2
.github/workflows/security.yaml
vendored
2
.github/workflows/security.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.21.5
|
||||
FROM golang:1.22.5
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -23,6 +23,8 @@ FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
USER 1000
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
8
Makefile
8
Makefile
@@ -26,12 +26,14 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.55.2
|
||||
GOLANGCI_VERSION := v1.61.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.4.0
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||
|
||||
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
# registry, not production.
|
||||
@@ -134,7 +136,7 @@ gen:
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.21.5 gen
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
|
||||
98
README.md
98
README.md
@@ -2,7 +2,7 @@
|
||||

|
||||
|
||||
<p align="left">
|
||||
↖️ Click at the [bullet list icon] at the top left corner of the Readme visualization for the github generated table of contents.
|
||||
↗️️ Click at the [bullet list icon] at the top right corner of the Readme visualization for the github generated table of contents.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -33,17 +33,15 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.26.1`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
|
||||
|v0.27.x|[`release-1.27`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.27/README.md)|
|
||||
|v0.26.x|[`release-1.26`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.26/README.md)|
|
||||
|v0.25.x|[`release-1.25`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.25/README.md)|
|
||||
|v0.24.x|[`release-1.24`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.24/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -95,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.31' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.31' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.26.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.31' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -114,8 +112,6 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy, Default Evictor and Strategy plugins
|
||||
|
||||
**⚠️ v1alpha1 configuration is still supported, but deprecated (and soon will be removed). Please consider migrating to v1alpha2 (described bellow). For previous v1alpha1 documentation go to [docs/deprecated/v1alpha1.md](docs/deprecated/v1alpha1.md) ⚠️**
|
||||
|
||||
The Descheduler Policy is configurable and includes default strategy plugins that can be enabled or disabled. It includes a common eviction configuration at the top level, as well as configuration from the Evictor plugin (Default Evictor, if not specified otherwise). Top-level configuration and Evictor plugin configuration are applied to all evictions.
|
||||
|
||||
### Top Level configuration
|
||||
@@ -127,6 +123,7 @@ These are top level keys in the Descheduler Policy that you can use to configure
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
@@ -142,6 +139,8 @@ The Default Evictor Plugin is used by default for filtering pods before processi
|
||||
|`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
|
||||
|`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
|
||||
|`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
|`minPodAge`|`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -157,6 +156,7 @@ kind: "DeschedulerPolicy"
|
||||
nodeSelector: "node=node1" # you don't need to set this, if not set all will be processed
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
@@ -166,6 +166,7 @@ profiles:
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
# DefaultEvictor is enabled for both `filter` and `preEvictionFilter`
|
||||
# filter:
|
||||
@@ -205,7 +206,7 @@ Balance Plugins: These plugins process all pods, or groups of pods, and determin
|
||||
| [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint) |Balance|Evicts pods violating TopologySpreadConstraints|
|
||||
| [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts) |Deschedule|Evicts pods having too many restarts|
|
||||
| [PodLifeTime](#podlifetime) |Deschedule|Evicts pods that have exceeded a specified age limit|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons|
|
||||
| [RemoveFailedPods](#removefailedpods) |Deschedule|Evicts pods with certain failed reasons and exit codes|
|
||||
|
||||
|
||||
### RemoveDuplicates
|
||||
@@ -500,18 +501,22 @@ key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
If a list of includedTaints is provided, a taint will be considered if and only if it matches an included key **or** key=value from the list. Otherwise it will be ignored. Leaving includedTaints unset will include any taint by default.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`includedTaints`|list(string)|
|
||||
|`includePreferNoSchedule`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
Setting `excludedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
@@ -526,7 +531,25 @@ profiles:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
````
|
||||
```
|
||||
|
||||
Setting `includedTaints`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingNodeTaints"
|
||||
args:
|
||||
includedTaints:
|
||||
- decommissioned=end-of-life # include only taints with key "decommissioned" and value "end-of-life"
|
||||
- reserved # include all taints with key "reserved"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeTaints"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
@@ -637,20 +660,23 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|Notes|
|
||||
|---|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int||
|
||||
|`states`|list(string)|Only supported in v0.25+|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||
| Name | Type | Notes |
|
||||
|--------------------------------|---------------------------------------------------|--------------------------|
|
||||
| `maxPodLifeTimeSeconds` | int | |
|
||||
| `states` | list(string) | Only supported in v0.25+ |
|
||||
| `includingInitContainers` | bool | Only supported in v0.31+ |
|
||||
| `includingEphemeralContainers` | bool | Only supported in v0.31+ |
|
||||
| `namespaces` | (see [namespace filtering](#namespace-filtering)) | |
|
||||
| `labelSelector` | (see [label filtering](#label-filtering)) | |
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -673,10 +699,8 @@ profiles:
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can provide optional parameters to filter by failed pods' and containters' `reasons`. and `exitCodes`. `exitCodes` apply to failed pods' containers with `terminated` state only. `reasons` and `exitCodes` can be expanded to include those of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
@@ -688,6 +712,7 @@ has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considere
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`exitCodes`|list(int32)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
@@ -704,6 +729,8 @@ profiles:
|
||||
args:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
exitCodes:
|
||||
- 1
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
@@ -718,7 +745,7 @@ profiles:
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including and excluding namespaces respectively:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
@@ -728,11 +755,10 @@ The following strategies accept a `namespaces` parameter which allows to specify
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
|
||||
The following strategies accept a `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
The following strategies accept an `evictableNamespaces` parameter which allows to specify a list of excluding namespaces:
|
||||
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||
|
||||
For example with PodLifeTime:
|
||||
In the following example with `PodLifeTime`, `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -753,8 +779,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
In the example `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
The similar holds for `exclude` field. The strategy gets executed over all namespaces but `namespace1` and `namespace2` in the following example.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
@@ -775,9 +800,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
It's not allowed to combine `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
@@ -835,7 +858,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -880,6 +903,7 @@ profiles:
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
- Any `podAntiAffinity` between the pod and the pods on the other nodes
|
||||
|
||||
E.g.
|
||||
|
||||
@@ -901,7 +925,7 @@ profiles:
|
||||
- "PodLifeTime"
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Note that node fit filtering references the current pod spec, and not that of its owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
@@ -915,7 +939,7 @@ When the descheduler decides to evict pods from a node, it employs the following
|
||||
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
|
||||
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
|
||||
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
|
||||
* Pods associated with DaemonSets are never evicted.
|
||||
* Pods associated with DaemonSets are never evicted (unless `evictDaemonSetPods: true` is set).
|
||||
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.29.0
|
||||
appVersion: 0.29.0
|
||||
version: 0.31.0
|
||||
appVersion: 0.31.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -52,6 +52,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
@@ -84,6 +85,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.replicas 1.0}}
|
||||
{{- if eq (.Values.replicas | int) 1 }}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- if .Values.leaderElection }}
|
||||
|
||||
@@ -24,6 +24,14 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the namespace of the release.
|
||||
Allows overriding it for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "descheduler.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
@@ -87,8 +95,10 @@ Leader Election
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceNamescape }}
|
||||
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
|
||||
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
|
||||
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
|
||||
{{- if $resourceNamespace -}}
|
||||
- --leader-elect-resource-namespace={{ $resourceNamespace }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,5 +12,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
@@ -10,3 +11,4 @@ data:
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
@@ -51,6 +51,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
@@ -77,14 +81,20 @@ spec:
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -3,11 +3,11 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if gt .Values.replicas 1.0}}
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
@@ -53,7 +53,11 @@ spec:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
@@ -63,8 +67,10 @@ spec:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
@@ -84,6 +90,10 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
- {{ include "descheduler.namespace" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
|
||||
1
charts/descheduler/tests/.gitignore
vendored
Normal file
1
charts/descheduler/tests/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
__snapshot__
|
||||
17
charts/descheduler/tests/cronjob_test.yaml
Normal file
17
charts/descheduler/tests/cronjob_test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
suite: Test Descheduler CronJob
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: creates CronJob when kind is set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: CronJob
|
||||
49
charts/descheduler/tests/deployment_test.yaml
Normal file
49
charts/descheduler/tests/deployment_test.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
suite: Test Descheduler Deployment
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: Deployment
|
||||
|
||||
tests:
|
||||
- it: creates Deployment when kind is set
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
|
||||
- it: enables leader-election
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect=true
|
||||
|
||||
- it: support leader-election resourceNamespace
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamespace: test
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=test
|
||||
|
||||
- it: support legacy leader-election resourceNamescape
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamescape: typo
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=typo
|
||||
@@ -39,6 +39,9 @@ podSecurityContext: {}
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
@@ -70,7 +73,7 @@ leaderElection: {}
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
# resourceNamespace: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
@@ -79,14 +82,19 @@ cmdOptions:
|
||||
v: 3
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha1"
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
@@ -94,40 +102,47 @@ deschedulerPolicy:
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsHavingTooManyRestarts:
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
RemovePodsViolatingTopologySpreadConstraint:
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeAffinity
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
@@ -153,6 +168,13 @@ affinity: {}
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
|
||||
@@ -77,7 +77,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
|
||||
secureServing.DisableHTTP2 = !s.EnableHTTP2
|
||||
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
ctx, done := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
@@ -117,9 +117,10 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
return err
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
|
||||
@@ -19,6 +19,7 @@ descheduler [flags]
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
@@ -35,6 +36,8 @@ descheduler [flags]
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
@@ -48,8 +51,8 @@ descheduler [flags]
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
|
||||
@@ -1,784 +0,0 @@
|
||||
[](https://goreportcard.com/report/sigs.k8s.io/descheduler)
|
||||

|
||||
|
||||
<p align="center">
|
||||
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
|
||||
</p>
|
||||
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
|
||||
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
|
||||
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
|
||||
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
|
||||
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
|
||||
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
|
||||
to move already running pods to some other nodes for various reasons:
|
||||
|
||||
* Some nodes are under or over utilized.
|
||||
* The original scheduling decision does not hold true any more, as taints or labels are added to
|
||||
or removed from nodes, pod/node affinity requirements are not satisfied any more.
|
||||
* Some nodes failed and their pods moved to other nodes.
|
||||
* New nodes are added to clusters.
|
||||
|
||||
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
|
||||
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
|
||||
note, in current implementation, descheduler does not schedule replacement of evicted pods
|
||||
but relies on the default scheduler for that.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
<!-- toc -->
|
||||
- [Quick Start](#quick-start)
|
||||
- [Run As A Job](#run-as-a-job)
|
||||
- [Run As A CronJob](#run-as-a-cronjob)
|
||||
- [Run As A Deployment](#run-as-a-deployment)
|
||||
- [Install Using Helm](#install-using-helm)
|
||||
- [Install Using Kustomize](#install-using-kustomize)
|
||||
- [User Guide](#user-guide)
|
||||
- [Policy and Strategies](#policy-and-strategies)
|
||||
- [RemoveDuplicates](#removeduplicates)
|
||||
- [LowNodeUtilization](#lownodeutilization)
|
||||
- [HighNodeUtilization](#highnodeutilization)
|
||||
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
|
||||
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
|
||||
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
|
||||
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
|
||||
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
|
||||
- [PodLifeTime](#podlifetime)
|
||||
- [RemoveFailedPods](#removefailedpods)
|
||||
- [Filter Pods](#filter-pods)
|
||||
- [Namespace filtering](#namespace-filtering)
|
||||
- [Priority filtering](#priority-filtering)
|
||||
- [Label filtering](#label-filtering)
|
||||
- [Node Fit filtering](#node-fit-filtering)
|
||||
- [Pod Evictions](#pod-evictions)
|
||||
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
- [High Availability](#high-availability)
|
||||
- [Configure HA Mode](#configure-ha-mode)
|
||||
- [Metrics](#metrics)
|
||||
- [Compatibility Matrix](#compatibility-matrix)
|
||||
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
- [Communicating With Contributors](#communicating-with-contributors)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Code of conduct](#code-of-conduct)
|
||||
<!-- /toc -->
|
||||
|
||||
## Quick Start
|
||||
|
||||
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
|
||||
advantage of being able to be run multiple times without needing user intervention.
|
||||
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
|
||||
being evicted by itself or by the kubelet.
|
||||
|
||||
### Run As A Job
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/job/job.yaml
|
||||
```
|
||||
|
||||
### Run As A CronJob
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/cronjob/cronjob.yaml
|
||||
```
|
||||
|
||||
### Run As A Deployment
|
||||
|
||||
```
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f kubernetes/base/configmap.yaml
|
||||
kubectl create -f kubernetes/deployment/deployment.yaml
|
||||
```
|
||||
|
||||
### Install Using Helm
|
||||
|
||||
Starting with release v0.18.0 there is an official helm chart that can be used to install the
|
||||
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
|
||||
|
||||
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
|
||||
|
||||
### Install Using Kustomize
|
||||
|
||||
You can use kustomize to install descheduler.
|
||||
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.29.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
|
||||
See the [user guide](docs/user-guide.md) in the `/docs` directory.
|
||||
|
||||
## Policy and Strategies
|
||||
|
||||
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
|
||||
|
||||
The policy includes a common configuration that applies to all the strategies:
|
||||
| Name | Default Value | Description |
|
||||
|------|---------------|-------------|
|
||||
| `nodeSelector` | `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|
||||
As part of the policy, the parameters associated with each strategy can be configured.
|
||||
See each strategy for details on available parameters.
|
||||
|
||||
**Policy:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
nodeSelector: prod=dev
|
||||
evictFailedBarePods: false
|
||||
evictLocalStoragePods: true
|
||||
evictSystemCriticalPods: true
|
||||
maxNoOfPodsToEvictPerNode: 40
|
||||
ignorePvcPods: false
|
||||
strategies:
|
||||
...
|
||||
```
|
||||
|
||||
The following diagram provides a visualization of most of the strategies to help
|
||||
categorize how strategies fit together.
|
||||
|
||||

|
||||
|
||||
### RemoveDuplicates
|
||||
|
||||
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
|
||||
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
|
||||
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
|
||||
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
|
||||
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
|
||||
are ready again, this strategy could be enabled to evict those duplicate pods.
|
||||
|
||||
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
|
||||
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
|
||||
should include `ReplicaSet` to have pods created by Deployments excluded.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
params:
|
||||
removeDuplicates:
|
||||
excludeOwnerKinds:
|
||||
- "ReplicaSet"
|
||||
```
|
||||
|
||||
### LowNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
|
||||
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
|
||||
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
|
||||
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
|
||||
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
|
||||
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
|
||||
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
|
||||
can be configured for cpu, memory, and number of pods too in terms of percentage.
|
||||
|
||||
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||
|
||||
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
|
||||
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`useDeviationThresholds`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
|
||||
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
|
||||
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
|
||||
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||
trigger down scaling of under utilized nodes.
|
||||
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
|
||||
configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
|
||||
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
|
||||
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
|
||||
|
||||
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
|
||||
Currently, pods request resource requirements are considered for computing node resource utilization.
|
||||
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
|
||||
|
||||
The `thresholds` param could be tuned as per your cluster requirements. Note that this
|
||||
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
```
|
||||
|
||||
Policy should pass the following validation checks:
|
||||
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
|
||||
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
|
||||
* `thresholds` can not be nil.
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
|
||||
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
is above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
|
||||
### RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
|
||||
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
|
||||
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
|
||||
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
|
||||
node.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeAffinity
|
||||
|
||||
This strategy makes sure all pods violating
|
||||
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
|
||||
are eventually removed from nodes. Node affinity rules allow a pod to specify
|
||||
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
|
||||
to respect node affinity when scheduling the pod but kubelet to ignore
|
||||
in case node changes over time and no longer respects the affinity.
|
||||
When enabled, the strategy serves as a temporary implementation
|
||||
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
|
||||
that no longer respects node affinity.
|
||||
|
||||
For example, there is podA scheduled on nodeA which satisfies the node
|
||||
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
|
||||
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
|
||||
executed and there is another node available that satisfies the node affinity rule,
|
||||
podA gets evicted from nodeA.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`nodeAffinityType`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
```
|
||||
|
||||
### RemovePodsViolatingNodeTaints
|
||||
|
||||
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
|
||||
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted.
|
||||
|
||||
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
|
||||
key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
````yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: true
|
||||
params:
|
||||
excludedTaints:
|
||||
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
|
||||
- reserved # exclude all taints with key "reserved"
|
||||
````
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
|
||||
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
|
||||
This strategy requires k8s version 1.18 at a minimum.
|
||||
|
||||
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
|
||||
include soft constraints.
|
||||
|
||||
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`includeSoftConstraints`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
```
|
||||
|
||||
|
||||
### RemovePodsHavingTooManyRestarts
|
||||
|
||||
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
|
||||
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
|
||||
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
|
||||
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
|
||||
into that calculation.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`podRestartThreshold`|int|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
```
|
||||
|
||||
### PodLifeTime
|
||||
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
|
||||
|
||||
If a value for `states` or `podStatusPhases` is not specified,
|
||||
Pods in any state (even `Running`) are considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|Notes|
|
||||
|---|---|---|
|
||||
|`maxPodLifeTimeSeconds`|int||
|
||||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|
||||
|`states`|list(string)|Only supported in v0.25+|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))||
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
states:
|
||||
- "Pending"
|
||||
- "PodInitializing"
|
||||
```
|
||||
|
||||
### RemoveFailedPods
|
||||
|
||||
This strategy evicts pods that are in failed status phase.
|
||||
You can provide an optional parameter to filter by failed `reasons`.
|
||||
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
|
||||
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
|
||||
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
|
||||
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`minPodLifetimeSeconds`|uint|
|
||||
|`excludeOwnerKinds`|list(string)|
|
||||
|`reasons`|list(string)|
|
||||
|`includingInitContainers`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`labelSelector`|(see [label filtering](#label-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveFailedPods":
|
||||
enabled: true
|
||||
params:
|
||||
failedPods:
|
||||
reasons:
|
||||
- "NodeAffinity"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600
|
||||
```
|
||||
|
||||
## Filter Pods
|
||||
|
||||
### Namespace filtering
|
||||
|
||||
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemoveDuplicates`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
include:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
|
||||
The similar holds for `exclude` field:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
namespaces:
|
||||
exclude:
|
||||
- "namespace1"
|
||||
- "namespace2"
|
||||
```
|
||||
|
||||
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
|
||||
|
||||
It's not allowed to compute `include` with `exclude` field.
|
||||
|
||||
### Priority filtering
|
||||
|
||||
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
|
||||
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
|
||||
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
|
||||
is set to the value of `system-cluster-critical` priority class.
|
||||
|
||||
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
|
||||
|
||||
E.g.
|
||||
|
||||
Setting `thresholdPriority`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriority: 10000
|
||||
```
|
||||
|
||||
Setting `thresholdPriorityClassName`
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
thresholdPriorityClassName: "priorityclass1"
|
||||
```
|
||||
|
||||
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
|
||||
does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
This allows running strategies among pods the descheduler is interested in.
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 86400
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
component: redis
|
||||
matchExpressions:
|
||||
- {key: tier, operator: In, values: [cache]}
|
||||
- {key: environment, operator: NotIn, values: [dev]}
|
||||
```
|
||||
|
||||
|
||||
### Node Fit filtering
|
||||
|
||||
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
|
||||
* `RemoveDuplicates`
|
||||
* `LowNodeUtilization`
|
||||
* `HighNodeUtilization`
|
||||
* `RemovePodsViolatingInterPodAntiAffinity`
|
||||
* `RemovePodsViolatingNodeAffinity`
|
||||
* `RemovePodsViolatingNodeTaints`
|
||||
* `RemovePodsViolatingTopologySpreadConstraint`
|
||||
* `RemovePodsHavingTooManyRestarts`
|
||||
* `RemoveFailedPods`
|
||||
|
||||
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||
- A `nodeSelector` on the pod
|
||||
- Any `tolerations` on the pod and any `taints` on the other nodes
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
|
||||
E.g.
|
||||
|
||||
```yaml
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeFit: true
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu": 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu": 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
```
|
||||
|
||||
Note that node fit filtering references the current pod spec, and not that of it's owner.
|
||||
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
|
||||
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
|
||||
This is expected behavior as the descheduler is a "best-effort" mechanism.
|
||||
|
||||
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.
|
||||
@@ -4,24 +4,12 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
|
||||
139
go.mod
139
go.mod
@@ -1,125 +1,116 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.21
|
||||
go 1.22.5
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
google.golang.org/grpc v1.60.1
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/apiserver v0.29.0
|
||||
k8s.io/client-go v0.29.0
|
||||
k8s.io/code-generator v0.29.0
|
||||
k8s.io/component-base v0.29.0
|
||||
k8s.io/component-helpers v0.29.0
|
||||
k8s.io/klog/v2 v2.110.1
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/apiserver v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
k8s.io/code-generator v0.31.0
|
||||
k8s.io/component-base v0.31.0
|
||||
k8s.io/component-helpers v0.31.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/cel-go v0.17.7 // indirect
|
||||
github.com/google/cel-go v0.20.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/oauth2 v0.13.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
go.uber.org/zap v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.16.1 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
|
||||
k8s.io/kms v0.29.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||
k8s.io/kms v0.31.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
|
||||
|
||||
// CVE-2023-48795: https://github.com/kubernetes-sigs/descheduler/security/code-scanning/17
|
||||
// TODO: remove this replace once the upstream dependency is updated to v0.29.1
|
||||
k8s.io/api => k8s.io/api v0.0.0-20231220172311-84c476802242
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d
|
||||
)
|
||||
replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0
|
||||
|
||||
366
go.sum
366
go.sum
@@ -1,103 +1,93 @@
|
||||
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
|
||||
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
|
||||
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
|
||||
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
|
||||
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
|
||||
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ=
|
||||
github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||
github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
|
||||
github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
@@ -106,8 +96,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@@ -120,8 +110,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -131,8 +119,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -142,33 +128,33 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
|
||||
github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
|
||||
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
|
||||
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
|
||||
@@ -177,153 +163,124 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
|
||||
go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
|
||||
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY=
|
||||
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0=
|
||||
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
|
||||
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
@@ -333,42 +290,39 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.0.0-20231220172311-84c476802242 h1:0vL/RpEDBpP1Ib7D/+Gn1GcXRKoLfzHk39Bimd2kBNI=
|
||||
k8s.io/api v0.0.0-20231220172311-84c476802242/go.mod h1:77+m1u5YUTiZfSm1ad6sO9fCphn5EYI5cFzfE0VKOuY=
|
||||
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b h1:Z+R5mA4fTuumBjSg84ZIY+W8Gi3zW50/iw58LL1tUwQ=
|
||||
k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b/go.mod h1:HcS5UNnaHsno0i/Q7QQQzqFd5WS+W26bysH+Ez3FLxA=
|
||||
k8s.io/apiserver v0.29.0 h1:Y1xEMjJkP+BIi0GSEv1BBrf1jLU9UPfAnnGGbbDdp7o=
|
||||
k8s.io/apiserver v0.29.0/go.mod h1:31n78PsRKPmfpee7/l9NYEv67u6hOL6AfcE761HapDM=
|
||||
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d h1:Y5RdiizB/2/3NM2YcasfigRdLYEkSa0AslQLcUQGDMw=
|
||||
k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d/go.mod h1:1ou2dbm4LO33nvBBLNRvuzwYldF1e1DAjhtXzRCjF6o=
|
||||
k8s.io/code-generator v0.29.0 h1:2LQfayGDhaIlaamXjIjEQlCMy4JNCH9lrzas4DNW1GQ=
|
||||
k8s.io/code-generator v0.29.0/go.mod h1:5bqIZoCxs2zTRKMWNYqyQWW/bajc+ah4rh0tMY8zdGA=
|
||||
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
|
||||
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
|
||||
k8s.io/component-helpers v0.29.0 h1:Y8W70NGeitKxWwhsPo/vEQbQx5VqJV+3xfLpP3V1VxU=
|
||||
k8s.io/component-helpers v0.29.0/go.mod h1:j2coxVfmzTOXWSE6sta0MTgNSr572Dcx68F6DD+8fWc=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
|
||||
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/kms v0.29.0 h1:KJ1zaZt74CgvgV3NR7tnURJ/mJOKC5X3nwon/WdwgxI=
|
||||
k8s.io/kms v0.29.0/go.mod h1:mB0f9HLxRXeXUfHfn1A7rpwOlzXI1gIWu86z6buNoYA=
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e h1:snPmy96t93RredGRjKfMFt+gvxuVAncqSAyBveJtr4Q=
|
||||
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI=
|
||||
k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
|
||||
k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8=
|
||||
k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0=
|
||||
k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
|
||||
k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
|
||||
k8s.io/component-helpers v0.31.0 h1:jyRUKA+GX+q19o81k4x94imjNICn+e6Gzi6T89va1/A=
|
||||
k8s.io/component-helpers v0.31.0/go.mod h1:MrNIvT4iB7wXIseYSWfHUJB/aNUiFvbilp4qDfBQi6s=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.31.0 h1:KchILPfB1ZE+ka7223mpU5zeFNkmb45jl7RHnlImUaI=
|
||||
k8s.io/kms v0.31.0/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
@@ -20,6 +20,6 @@ function find_dirs_containing_comment_tags() {
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
IFS=",";
|
||||
IFS=" ";
|
||||
printf '%s' "${array[*]}";
|
||||
}
|
||||
|
||||
26
hack/lib/go.sh
Normal file
26
hack/lib/go.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# go::verify_version verifies the go version is supported by the project.
|
||||
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.20|go1.21|go1.22') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
@@ -6,5 +6,5 @@ go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/con
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||
--output-file-base zz_generated.conversion
|
||||
--output-file zz_generated.conversion.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
|
||||
--output-file zz_generated.defaults.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
|
||||
|
||||
@@ -20,13 +20,9 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
go::verify_version
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -20,13 +20,9 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
go::verify_version
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
1147
keps/1397-evictions-in-background/README.md
Normal file
1147
keps/1397-evictions-in-background/README.md
Normal file
File diff suppressed because it is too large
Load Diff
16
keps/1397-evictions-in-background/kep.yaml
Normal file
16
keps/1397-evictions-in-background/kep.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
title: descheduler integration with evacuation API as an alternative to eviction API
|
||||
kep-number: 1397
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
owning-sig: sig-scheduling
|
||||
participating-sigs:
|
||||
- sig-apps
|
||||
status: provisional
|
||||
creation-date: 2024-04-14
|
||||
reviewers:
|
||||
- atiratree
|
||||
approvers:
|
||||
- TBD
|
||||
feature-gates:
|
||||
- TBD
|
||||
stage: alpha
|
||||
1211
keps/753-descheduling-framework/README.md
Normal file
1211
keps/753-descheduling-framework/README.md
Normal file
File diff suppressed because it is too large
Load Diff
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
BIN
keps/753-descheduling-framework/framework-workflow-diagram.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
29
keps/753-descheduling-framework/kep.yaml
Normal file
29
keps/753-descheduling-framework/kep.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
title: Descheduling framework
|
||||
kep-number: 753
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
owning-sig: sig-scheduling
|
||||
status: provisional
|
||||
creation-date: 2024-04-08
|
||||
reviewers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
approvers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
#replaces:
|
||||
|
||||
# The target maturity stage in the current dev cycle for this KEP.
|
||||
stage: alpha
|
||||
|
||||
# The most recent milestone for which work toward delivery of this KEP has been
|
||||
# done. This can be the current (upcoming) milestone, if it is being actively
|
||||
# worked on.
|
||||
# latest-milestone: "v1.19"
|
||||
|
||||
# disable-supported: true
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.29.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.31.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "namespace", "node"})
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
|
||||
@@ -38,13 +38,16 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
|
||||
@@ -1,271 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
|
||||
// used for defaulting/converting typed PluginConfig Args.
|
||||
// Access via getPluginArgConversionScheme()
|
||||
|
||||
Scheme = runtime.NewScheme()
|
||||
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
|
||||
)
|
||||
|
||||
// evictorImpl implements the Evictor interface so plugins
|
||||
// can evict a pod without importing a specific pod evictor
|
||||
type evictorImpl struct {
|
||||
podEvictor *evictions.PodEvictor
|
||||
evictorFilter frameworktypes.EvictorPlugin
|
||||
}
|
||||
|
||||
var _ frameworktypes.Evictor = &evictorImpl{}
|
||||
|
||||
// Filter checks if a pod can be evicted
|
||||
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.Filter(pod)
|
||||
}
|
||||
|
||||
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
// Evict evicts a pod (no pre-check performed)
|
||||
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return ei.podEvictor.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
// handleImpl implements the framework handle which gets passed to plugins
|
||||
type handleImpl struct {
|
||||
clientSet clientset.Interface
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictor *evictorImpl
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &handleImpl{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
}
|
||||
|
||||
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.getPodsAssignedToNodeFunc
|
||||
}
|
||||
|
||||
// SharedInformerFactory retrieves shared informer factory
|
||||
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
return hi.sharedInformerFactory
|
||||
}
|
||||
|
||||
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
err := V1alpha1ToInternal(in, pluginregistry.PluginRegistry, out, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func V1alpha1ToInternal(
|
||||
deschedulerPolicy *DeschedulerPolicy,
|
||||
registry pluginregistry.Registry,
|
||||
out *api.DeschedulerPolicy,
|
||||
s conversion.Scope,
|
||||
) error {
|
||||
var evictLocalStoragePods bool
|
||||
if deschedulerPolicy.EvictLocalStoragePods != nil {
|
||||
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
|
||||
}
|
||||
|
||||
evictBarePods := false
|
||||
if deschedulerPolicy.EvictFailedBarePods != nil {
|
||||
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
|
||||
if evictBarePods {
|
||||
klog.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
}
|
||||
}
|
||||
|
||||
evictSystemCriticalPods := false
|
||||
if deschedulerPolicy.EvictSystemCriticalPods != nil {
|
||||
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
|
||||
if evictSystemCriticalPods {
|
||||
klog.V(1).Info("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
}
|
||||
}
|
||||
|
||||
ignorePvcPods := false
|
||||
if deschedulerPolicy.IgnorePVCPods != nil {
|
||||
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
|
||||
}
|
||||
|
||||
var profiles []api.DeschedulerProfile
|
||||
|
||||
// Build profiles
|
||||
for name, strategy := range deschedulerPolicy.Strategies {
|
||||
if _, ok := pluginregistry.PluginRegistry[string(name)]; ok {
|
||||
if strategy.Enabled {
|
||||
params := strategy.Params
|
||||
if params == nil {
|
||||
params = &StrategyParameters{}
|
||||
}
|
||||
|
||||
nodeFit := false
|
||||
if name != "PodLifeTime" {
|
||||
nodeFit = params.NodeFit
|
||||
}
|
||||
|
||||
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
|
||||
klog.ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
|
||||
return fmt.Errorf("priority threshold misconfigured for plugin %v", name)
|
||||
}
|
||||
|
||||
var priorityThreshold *api.PriorityThreshold
|
||||
if strategy.Params != nil {
|
||||
priorityThreshold = &api.PriorityThreshold{
|
||||
Value: strategy.Params.ThresholdPriority,
|
||||
Name: strategy.Params.ThresholdPriorityClassName,
|
||||
}
|
||||
}
|
||||
|
||||
var pluginConfig *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[string(name)]; exists {
|
||||
pluginConfig, err = pcFnc(params)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("failed to get plugin config for strategy %v: %v", name, err)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("unknown strategy name: %v", name)
|
||||
}
|
||||
|
||||
profile := api.DeschedulerProfile{
|
||||
Name: fmt.Sprintf("strategy-%v-profile", name),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: evictSystemCriticalPods,
|
||||
IgnorePvcPods: ignorePvcPods,
|
||||
EvictFailedBarePods: evictBarePods,
|
||||
NodeFit: nodeFit,
|
||||
PriorityThreshold: priorityThreshold,
|
||||
},
|
||||
},
|
||||
*pluginConfig,
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pluginArgs := registry[string(name)].PluginArgInstance
|
||||
pluginInstance, err := registry[string(name)].PluginBuilder(pluginArgs, &handleImpl{})
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("could not build plugin"), "plugin build error", "plugin", name)
|
||||
return fmt.Errorf("could not build plugin: %v", name)
|
||||
}
|
||||
|
||||
// pluginInstance can be of any of each type, or both
|
||||
profilePlugins := profile.Plugins
|
||||
profile.Plugins = enableProfilePluginsByType(profilePlugins, pluginInstance, pluginConfig)
|
||||
profiles = append(profiles, profile)
|
||||
}
|
||||
} else {
|
||||
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
|
||||
return fmt.Errorf("unknown strategy name: %v", name)
|
||||
}
|
||||
}
|
||||
|
||||
out.Profiles = profiles
|
||||
out.NodeSelector = deschedulerPolicy.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNamespace = deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace
|
||||
out.MaxNoOfPodsToEvictPerNode = deschedulerPolicy.MaxNoOfPodsToEvictPerNode
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
|
||||
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkBalance(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.BalancePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Balance plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
|
||||
}
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
func checkDeschedule(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
|
||||
_, ok := pluginInstance.(frameworktypes.DeschedulePlugin)
|
||||
if ok {
|
||||
klog.V(3).Infof("converting Deschedule plugin: %s", pluginInstance.Name())
|
||||
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
|
||||
}
|
||||
return profilePlugins
|
||||
}
|
||||
|
||||
// Register Conversions
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
)
|
||||
|
||||
// Once all strategies are migrated the arguments get read from the configuration file
|
||||
// without any wiring. Keeping the wiring here so the descheduler can still use
|
||||
// the v1alpha1 configuration during the strategy migration to plugins.
|
||||
|
||||
var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*api.PluginConfig, error){
|
||||
"RemovePodsViolatingNodeTaints": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
|
||||
ExcludedTaints: params.ExcludedTaints,
|
||||
}
|
||||
if err := removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodetaints.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemoveFailedPods": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
failedPodsParams := params.FailedPods
|
||||
if failedPodsParams == nil {
|
||||
failedPodsParams = &FailedPods{}
|
||||
}
|
||||
args := &removefailedpods.RemoveFailedPodsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
|
||||
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
|
||||
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
|
||||
Reasons: failedPodsParams.Reasons,
|
||||
}
|
||||
if err := removefailedpods.ValidateRemoveFailedPodsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removefailedpods.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingNodeAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
NodeAffinityType: params.NodeAffinityType,
|
||||
}
|
||||
if err := removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodeaffinity.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingInterPodAntiAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
}
|
||||
if err := removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatinginterpodantiaffinity.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsHavingTooManyRestarts": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
tooManyRestartsParams := params.PodsHavingTooManyRestarts
|
||||
if tooManyRestartsParams == nil {
|
||||
tooManyRestartsParams = &PodsHavingTooManyRestarts{}
|
||||
}
|
||||
args := &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
|
||||
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
|
||||
}
|
||||
if err := removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodshavingtoomanyrestarts.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"PodLifeTime": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
podLifeTimeParams := params.PodLifeTime
|
||||
if podLifeTimeParams == nil {
|
||||
podLifeTimeParams = &PodLifeTime{}
|
||||
}
|
||||
|
||||
var states []string
|
||||
if podLifeTimeParams.PodStatusPhases != nil {
|
||||
states = append(states, podLifeTimeParams.PodStatusPhases...)
|
||||
}
|
||||
if podLifeTimeParams.States != nil {
|
||||
states = append(states, podLifeTimeParams.States...)
|
||||
}
|
||||
|
||||
args := &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
|
||||
States: states,
|
||||
}
|
||||
if err := podlifetime.ValidatePodLifeTimeArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", podlifetime.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemoveDuplicates": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
args := &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
}
|
||||
if params.RemoveDuplicates != nil {
|
||||
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
|
||||
}
|
||||
if err := removeduplicates.ValidateRemoveDuplicatesArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removeduplicates.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
|
||||
if params.IncludeSoftConstraints {
|
||||
constraints = append(constraints, v1.ScheduleAnyway)
|
||||
}
|
||||
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
LabelSelector: params.LabelSelector,
|
||||
Constraints: constraints,
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
}
|
||||
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingtopologyspreadconstraint.PluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"HighNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
if params.NodeResourceUtilizationThresholds == nil {
|
||||
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
|
||||
}
|
||||
args := &nodeutilization.HighNodeUtilizationArgs{
|
||||
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
if err := nodeutilization.ValidateHighNodeUtilizationArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.HighNodeUtilizationPluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
"LowNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
|
||||
if params.NodeResourceUtilizationThresholds == nil {
|
||||
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
|
||||
}
|
||||
args := &nodeutilization.LowNodeUtilizationArgs{
|
||||
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
|
||||
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
|
||||
TargetThresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.TargetThresholds),
|
||||
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
|
||||
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
|
||||
}
|
||||
|
||||
if err := nodeutilization.ValidateLowNodeUtilizationArgs(args); err != nil {
|
||||
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
|
||||
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.LowNodeUtilizationPluginName, err)
|
||||
}
|
||||
return &api.PluginConfig{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: args,
|
||||
}, nil
|
||||
},
|
||||
}
|
||||
|
||||
func v1alpha1NamespacesToInternal(namespaces *Namespaces) *api.Namespaces {
|
||||
internal := &api.Namespaces{}
|
||||
if namespaces != nil {
|
||||
if namespaces.Exclude != nil {
|
||||
internal.Exclude = namespaces.Exclude
|
||||
}
|
||||
if namespaces.Include != nil {
|
||||
internal.Include = namespaces.Include
|
||||
}
|
||||
} else {
|
||||
internal = nil
|
||||
}
|
||||
return internal
|
||||
}
|
||||
|
||||
func v1alpha1ThresholdToInternal(thresholds ResourceThresholds) api.ResourceThresholds {
|
||||
internal := make(api.ResourceThresholds, len(thresholds))
|
||||
for k, v := range thresholds {
|
||||
internal[k] = api.Percentage(float64(v))
|
||||
}
|
||||
return internal
|
||||
}
|
||||
@@ -1,859 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
)
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingNodeTaints"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
ExcludedTaints: []string{
|
||||
"dedicated=special-user",
|
||||
"reserved",
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
|
||||
strategyName := "RemoveFailedPods"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
FailedPods: &FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingNodeAffinity"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params, not setting nodeaffinity type",
|
||||
params: &StrategyParameters{},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: nodeAffinityType needs to be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingInterPodAntiAffinity"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
strategyName := "RemovePodsHavingTooManyRestarts"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params restart threshold",
|
||||
params: &StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 0,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: invalid PodsHavingTooManyRestarts threshold", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
|
||||
strategyName := "PodLifeTime"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
States: []string{
|
||||
"Pending",
|
||||
"PodInitializing",
|
||||
},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params MaxPodLifeTimeSeconds not set",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: MaxPodLifeTimeSeconds not set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
|
||||
strategyName := "RemoveDuplicates"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
RemoveDuplicates: &RemoveDuplicates{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
PodLifeTime: &PodLifeTime{
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t *testing.T) {
|
||||
strategyName := "RemovePodsViolatingTopologySpreadConstraint"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: true,
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "params without soft constraints",
|
||||
params: &StrategyParameters{
|
||||
IncludeSoftConstraints: false,
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
|
||||
strategyName := "HighNodeUtilization"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
NumberOfNodes: 3,
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params nil ResourceThresholds",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: no resource threshold is configured", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params out of bounds threshold",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(150),
|
||||
},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: cpu threshold not in [0, 100] range", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
|
||||
strategyName := "LowNodeUtilization"
|
||||
type testCase struct {
|
||||
description string
|
||||
params *StrategyParameters
|
||||
err error
|
||||
result *api.PluginConfig
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "wire in all valid parameters",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
TargetThresholds: ResourceThresholds{
|
||||
"cpu": Percentage(50),
|
||||
"memory": Percentage(50),
|
||||
"pods": Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
},
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
Namespaces: &Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
result: &api.PluginConfig{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"test1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid params namespaces",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(20),
|
||||
"memory": Percentage(20),
|
||||
"pods": Percentage(20),
|
||||
},
|
||||
TargetThresholds: ResourceThresholds{
|
||||
"cpu": Percentage(50),
|
||||
"memory": Percentage(50),
|
||||
"pods": Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
},
|
||||
Namespaces: &Namespaces{
|
||||
Include: []string{"test2"},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params nil ResourceThresholds",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: no resource threshold is configured", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
{
|
||||
description: "invalid params out of bounds threshold",
|
||||
params: &StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: ResourceThresholds{
|
||||
"cpu": Percentage(150),
|
||||
},
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: cpu threshold not in [0, 100] range", strategyName),
|
||||
result: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
var result *api.PluginConfig
|
||||
var err error
|
||||
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
|
||||
result, err = pcFnc(tc.params)
|
||||
}
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList `json:"strategies,omitempty"`
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
StrategyName string
|
||||
StrategyList map[StrategyName]DeschedulerStrategy
|
||||
)
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Weight
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable.
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
FailedPods *FailedPods `json:"failedPods,omitempty"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
)
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
States []string `json:"states,omitempty"`
|
||||
|
||||
// Deprecated: Use States instead.
|
||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||
}
|
||||
|
||||
type FailedPods struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
generated
@@ -1,380 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictFailedBarePods != nil {
|
||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictLocalStoragePods != nil {
|
||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MinPodLifetimeSeconds != nil {
|
||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reasons != nil {
|
||||
in, out := &in.Reasons, &out.Reasons
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedPods)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.States != nil {
|
||||
in, out := &in.States, &out.States
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicates)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
func (in StrategyList) DeepCopy() StrategyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FailedPods != nil {
|
||||
in, out := &in.FailedPods, &out.FailedPods
|
||||
*out = new(FailedPods)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ThresholdPriority != nil {
|
||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludedTaints != nil {
|
||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -37,6 +37,9 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
|
||||
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
2
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -104,6 +104,7 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -122,6 +123,7 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
5
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
5
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -51,6 +51,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
5
pkg/api/zz_generated.deepcopy.go
generated
5
pkg/api/zz_generated.deepcopy.go
generated
@@ -51,6 +51,11 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
|
||||
@@ -67,6 +67,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
@@ -89,6 +90,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
|
||||
@@ -67,11 +67,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
|
||||
context, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig")
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: current context not found")
|
||||
}
|
||||
|
||||
if val, ok := config.Clusters[context.Cluster]; ok {
|
||||
return val.Server, nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig")
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
@@ -18,13 +18,14 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/tools/events"
|
||||
@@ -69,19 +70,20 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictionPolicyGroupVersion string
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
rs *options.DeschedulerServer
|
||||
podLister listersv1.PodLister
|
||||
nodeLister listersv1.NodeLister
|
||||
namespaceLister listersv1.NamespaceLister
|
||||
priorityClassLister schedulingv1.PriorityClassLister
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
@@ -93,17 +95,30 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
nil,
|
||||
eventRecorder,
|
||||
evictions.NewOptions().
|
||||
WithPolicyGroupVersion(evictionPolicyGroupVersion).
|
||||
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
|
||||
return &descheduler{
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
rs: rs,
|
||||
podLister: podLister,
|
||||
nodeLister: nodeLister,
|
||||
namespaceLister: namespaceLister,
|
||||
priorityClassLister: priorityClassLister,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -128,7 +143,10 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||
err := cachedClient(d.rs.Client, fakeClient, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -152,21 +170,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
client,
|
||||
d.evictionPolicyGroupVersion,
|
||||
d.rs.DryRun,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!d.rs.DisableMetrics,
|
||||
d.eventRecorder,
|
||||
)
|
||||
klog.V(3).Infof("Setting up the pod evictor")
|
||||
d.podEvictor.SetClient(client)
|
||||
d.podEvictor.ResetCounters()
|
||||
|
||||
d.runProfiles(ctx, client, nodes, podEvictor)
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", d.podEvictor.TotalEvicted())
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -174,7 +184,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
@@ -185,7 +195,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(podEvictor),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -275,46 +285,38 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
|
||||
serverVersionInfo, err := discovery.ServerVersion()
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, deschedulerVersionInfo version.Info) error {
|
||||
kubeServerVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
return errors.New("failed to discover Kubernetes server version")
|
||||
return fmt.Errorf("failed to discover Kubernetes server version: %v", err)
|
||||
}
|
||||
|
||||
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
|
||||
kubeServerVersion, err := utilversion.ParseSemantic(kubeServerVersionInfo.String())
|
||||
if err != nil {
|
||||
return errors.New("failed to parse Kubernetes server version")
|
||||
return fmt.Errorf("failed to parse Kubernetes server version '%s': %v", kubeServerVersionInfo.String(), err)
|
||||
}
|
||||
|
||||
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
|
||||
deschedulerMinor, err := strconv.ParseFloat(deschedulerVersionInfo.Minor, 64)
|
||||
if err != nil {
|
||||
return errors.New("failed to convert Descheduler minor version to float")
|
||||
return fmt.Errorf("failed to convert Descheduler minor version '%s' to float: %v", deschedulerVersionInfo.Minor, err)
|
||||
}
|
||||
|
||||
deschedulerMinor := float64(deschedulerVersion.Minor())
|
||||
serverMinor := float64(serverVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-serverMinor) > 3 {
|
||||
kubeServerMinor := float64(kubeServerVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-kubeServerMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler version %v may not be supported on your version of Kubernetes %v."+
|
||||
"descheduler version %s.%s may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerVersion.String(),
|
||||
serverVersionInfo.String(),
|
||||
deschedulerVersionInfo.Major,
|
||||
deschedulerVersionInfo.Minor,
|
||||
kubeServerVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) (clientset.Interface, error) {
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
@@ -331,62 +333,71 @@ func cachedClient(
|
||||
}
|
||||
// fallback to the default reactor
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
fakeClient *fakeclientset.Clientset,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) error {
|
||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||
pods, err := podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||
return fmt.Errorf("unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range pods {
|
||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy pod: %v", err)
|
||||
return fmt.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range nodes {
|
||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||
return fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err := namespaceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||
return fmt.Errorf("unable to list namespaces: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range namespaces {
|
||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy namespace: %v", err)
|
||||
return fmt.Errorf("unable to copy namespace: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
return fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range priorityClasses {
|
||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
return fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fakeClient, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
defer span.End()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
@@ -402,7 +413,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
@@ -417,7 +428,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.nodeLister, nodeSelector)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
@@ -462,3 +473,10 @@ func createClients(clientConnection componentbaseconfig.ClientConnectionConfigur
|
||||
|
||||
return kClient, eventClient, nil
|
||||
}
|
||||
|
||||
func trimManagedFields(obj interface{}) (interface{}, error) {
|
||||
if accessor, err := meta.Accessor(obj); err == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
@@ -9,62 +9,134 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
// scope contains information about an ongoing conversion.
|
||||
type scope struct {
|
||||
converter *conversion.Converter
|
||||
meta *conversion.Meta
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
// Convert continues a conversion.
|
||||
func (s scope) Convert(src, dest interface{}) error {
|
||||
return s.converter.Convert(src, dest, s.meta)
|
||||
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemovePodsViolatingNodeTaints",
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemovePodsViolatingNodeTaints",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Meta returns the meta object that was originally passed to Convert.
|
||||
func (s scope) Meta() *conversion.Meta {
|
||||
return s.meta
|
||||
func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemoveDuplicates",
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemoveDuplicates",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolicy *api.DeschedulerPolicy, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
return rs, descheduler, client
|
||||
}
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{},
|
||||
}
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -95,16 +167,9 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
@@ -114,9 +179,7 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
@@ -136,13 +199,6 @@ func TestDuplicate(t *testing.T) {
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -161,20 +217,14 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
if len(evictedPods) == 0 {
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
t.Fatalf("Unable to evict pods\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,44 +301,44 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerVersion string
|
||||
deschedulerVersion deschedulerversion.Info
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: "0.23",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "23"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: "v0.26",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: "v0.22",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "22"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: "v0.27",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "27"},
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: "v0.25",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "25"},
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
@@ -298,8 +348,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
|
||||
err := validateVersionCompatibility(fakeDiscovery, tc.deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
@@ -309,7 +358,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -323,3 +372,170 @@ func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (boo
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
}
|
||||
}
|
||||
|
||||
func taintNodeNoSchedule(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodEvictorReset(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, internalDeschedulerPolicy, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// check the fake client syncing and the right pods evicted
|
||||
klog.Infof("Enabling the dry run mode")
|
||||
rs.DryRun = true
|
||||
evictedPods = []string{}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeschedulingLimits(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
policy *api.DeschedulerPolicy
|
||||
limit uint
|
||||
}{
|
||||
{
|
||||
description: "limits per node",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNode = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per namespace",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNamespace = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per cycle",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictTotal = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, tc.policy, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
pods := []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
|
||||
}
|
||||
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", tc.limit, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
45
pkg/descheduler/evictions/errors.go
Normal file
45
pkg/descheduler/evictions/errors.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package evictions
|
||||
|
||||
type EvictionNodeLimitError struct {
|
||||
node string
|
||||
}
|
||||
|
||||
func (e EvictionNodeLimitError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func NewEvictionNodeLimitError(node string) *EvictionNodeLimitError {
|
||||
return &EvictionNodeLimitError{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNodeLimitError{}
|
||||
|
||||
type EvictionNamespaceLimitError struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (e EvictionNamespaceLimitError) Error() string {
|
||||
return "maximum number of evicted pods per namespace reached"
|
||||
}
|
||||
|
||||
func NewEvictionNamespaceLimitError(namespace string) *EvictionNamespaceLimitError {
|
||||
return &EvictionNamespaceLimitError{
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNamespaceLimitError{}
|
||||
|
||||
type EvictionTotalLimitError struct{}
|
||||
|
||||
func (e EvictionTotalLimitError) Error() string {
|
||||
return "maximum number of evicted pods per a descheduling cycle reached"
|
||||
}
|
||||
|
||||
func NewEvictionTotalLimitError() *EvictionTotalLimitError {
|
||||
return &EvictionTotalLimitError{}
|
||||
}
|
||||
|
||||
var _ error = &EvictionTotalLimitError{}
|
||||
@@ -19,6 +19,7 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
@@ -42,107 +43,120 @@ type (
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
mu sync.Mutex
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
nodepodCount nodePodEvictedCount
|
||||
maxPodsToEvictTotal *uint
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
metricsEnabled bool,
|
||||
eventRecorder events.EventRecorder,
|
||||
options *Options,
|
||||
) *PodEvictor {
|
||||
nodePodCount := make(nodePodEvictedCount)
|
||||
namespacePodCount := make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node.Name] = 0
|
||||
if options == nil {
|
||||
options = NewOptions()
|
||||
}
|
||||
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||
nodepodCount: nodePodCount,
|
||||
namespacePodCount: namespacePodCount,
|
||||
metricsEnabled: metricsEnabled,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
}
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
return pe.nodepodCount[node.Name]
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
return pe.nodePodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
var total uint
|
||||
for _, count := range pe.nodepodCount {
|
||||
total += count
|
||||
}
|
||||
return total
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
return pe.totalPodCount
|
||||
}
|
||||
|
||||
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
|
||||
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
if pe.maxPodsToEvictPerNode != nil {
|
||||
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
|
||||
}
|
||||
return false
|
||||
func (pe *PodEvictor) ResetCounters() {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.nodePodCount = make(nodePodEvictedCount)
|
||||
pe.namespacePodCount = make(namespacePodEvictCount)
|
||||
pe.totalPodCount = 0
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
Reason string
|
||||
// ProfileName allows for passing details about profile for observability.
|
||||
ProfileName string
|
||||
// StrategyName allows for passing details about strategy for observability.
|
||||
StrategyName string
|
||||
}
|
||||
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+1 > *pe.maxPodsToEvictTotal {
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per node reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per node reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", "Maximum number of evicted pods per namespace reached")))
|
||||
klog.ErrorS(fmt.Errorf("maximum number of evicted pods per namespace reached"), "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return err
|
||||
}
|
||||
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
@@ -151,34 +165,35 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
return false
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodepodCount[pod.Spec.NodeName]++
|
||||
pe.nodePodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
pe.namespacePodCount[pod.Namespace]++
|
||||
pe.totalPodCount++
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
reason := opts.Reason
|
||||
if len(reason) == 0 {
|
||||
reason = strategy
|
||||
reason = opts.StrategyName
|
||||
if len(reason) == 0 {
|
||||
reason = "NotSet"
|
||||
}
|
||||
}
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
}
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||
|
||||
@@ -22,9 +22,12 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -113,3 +116,52 @@ func TestPodTypes(t *testing.T) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := NewPodEvictor(
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
NewOptions().WithMaxPodsToEvictPerNode(utilptr.To[uint](1)),
|
||||
)
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 0 {
|
||||
t.Errorf("Expected 0 node evictions, got %q instead", evictions)
|
||||
}
|
||||
// 0 evictions expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 0 {
|
||||
t.Errorf("Expected 0 total evictions, got %q instead", evictions)
|
||||
}
|
||||
|
||||
if err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{}); err != nil {
|
||||
t.Errorf("Expected a pod eviction, got an eviction error instead: %v", err)
|
||||
}
|
||||
|
||||
// 1 node eviction expected
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 1 {
|
||||
t.Errorf("Expected 1 node eviction, got %q instead", evictions)
|
||||
}
|
||||
// 1 total eviction expected
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != 1 {
|
||||
t.Errorf("Expected 1 total evictions, got %q instead", evictions)
|
||||
}
|
||||
|
||||
err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{})
|
||||
if err == nil {
|
||||
t.Errorf("Expected a pod eviction error, got nil instead")
|
||||
}
|
||||
switch err.(type) {
|
||||
case *EvictionNodeLimitError:
|
||||
// all good
|
||||
default:
|
||||
t.Errorf("Expected a pod eviction EvictionNodeLimitError error, got a different error instead: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
51
pkg/descheduler/evictions/options.go
Normal file
51
pkg/descheduler/evictions/options.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package evictions
|
||||
|
||||
import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
metricsEnabled bool
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
policyGroupVersion: policy.SchemeGroupVersion.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) WithPolicyGroupVersion(policyGroupVersion string) *Options {
|
||||
o.policyGroupVersion = policyGroupVersion
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithDryRun(dryRun bool) *Options {
|
||||
o.dryRun = dryRun
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNode(maxPodsToEvictPerNode *uint) *Options {
|
||||
o.maxPodsToEvictPerNode = maxPodsToEvictPerNode
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace *uint) *Options {
|
||||
o.maxPodsToEvictPerNamespace = maxPodsToEvictPerNamespace
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
o.maxPodsToEvictTotal = maxPodsToEvictTotal
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
}
|
||||
@@ -18,20 +18,24 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
const workersCount = 100
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
|
||||
@@ -104,83 +108,96 @@ func IsReady(node *v1.Node) bool {
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
// Check node selector and required affinity
|
||||
var errors []error
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
errors = append(errors, err)
|
||||
return err
|
||||
} else if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||
}
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqErrors := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
}
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
}
|
||||
|
||||
return errors
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func podFitsNodes(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node, excludeFilter func(pod *v1.Pod, node *v1.Node) bool) bool {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var filteredLen int32
|
||||
checkNode := func(i int) {
|
||||
node := nodes[i]
|
||||
if excludeFilter != nil && excludeFilter(pod, node) {
|
||||
return
|
||||
}
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
atomic.AddInt32(&filteredLen, 1)
|
||||
cancel()
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "err", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Stops searching for more nodes once a node are found.
|
||||
workqueue.ParallelizeUntil(ctx, workersCount, len(nodes), checkNode)
|
||||
|
||||
return filteredLen > 0
|
||||
}
|
||||
|
||||
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
|
||||
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
// Skip node pod is already on
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
continue
|
||||
}
|
||||
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
return pod.Spec.NodeName == node.Name
|
||||
})
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
}
|
||||
|
||||
return false
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, nil)
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pod does not fit on node",
|
||||
"pod:", klog.KObj(pod), "node:", klog.KObj(node), "error:", utilerrors.NewAggregate(errors).Error())
|
||||
klog.V(4).InfoS("Pod does not fit on current node",
|
||||
"pod", klog.KObj(pod), "node", klog.KObj(node), "error", err)
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -193,9 +210,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
|
||||
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
|
||||
// the pod will fit.
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
|
||||
var insufficientResources []error
|
||||
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
@@ -205,25 +220,22 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
if err != nil {
|
||||
return false, []error{err}
|
||||
return false, err
|
||||
}
|
||||
|
||||
podFitsOnNode := true
|
||||
for _, resource := range resourceNames {
|
||||
podResourceRequest := podRequests[resource]
|
||||
availableResource, ok := availableResources[resource]
|
||||
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
|
||||
podFitsOnNode = false
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if availableResources[v1.ResourcePods].MilliValue() <= 0 {
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", v1.ResourcePods))
|
||||
podFitsOnNode = false
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
return podFitsOnNode, insufficientResources
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
@@ -323,3 +335,43 @@ func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// podMatchesInterPodAntiAffinity checks if the pod matches the anti-affinity rule
|
||||
// of another pod that is already on the given node.
|
||||
// If a match is found, it returns true.
|
||||
func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing all pods: %v", err)
|
||||
}
|
||||
assignedPodsInNamespace := podutil.GroupByNamespace(podsOnNode)
|
||||
|
||||
for _, term := range utils.GetPodAntiAffinityTerms(pod.Spec.Affinity.PodAntiAffinity) {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false, err
|
||||
}
|
||||
|
||||
for namespace := range namespaces {
|
||||
for _, assignedPod := range assignedPodsInNamespace[namespace] {
|
||||
if assignedPod.Name == pod.Name || !utils.PodMatchesTermsNamespaceAndSelector(assignedPod, namespaces, selector) {
|
||||
klog.V(4).InfoS("Pod doesn't match inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := node.Labels[term.TopologyKey]; ok {
|
||||
klog.V(1).InfoS("Pod matches inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -230,7 +231,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
|
||||
nodeNames := []string{"node1", "node2", "stagingNode"}
|
||||
nodeNames := []string{"node1", "node2", "stagingNode", "node4"}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -716,6 +717,151 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. One node has a taint, and the other three nodes do not meet the resource requirements, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 3000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 3000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, just fourth node meets the requirements, should success",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, fourth node is the one where the pod is located, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[3], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -753,8 +899,60 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsNodes(t *testing.T) {
|
||||
nodeNames := []string{"node1", "node2", "node3", "node4"}
|
||||
pod := test.BuildTestPod("p1", 950, 2*1000*1000*1000, nodeNames[0], nil)
|
||||
nodes := []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[1], 200, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[2], 300, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 400, 8*1000*1000*1000, 12, nil),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
objs = append(objs, pod)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodesTraversed sync.Map
|
||||
podFitsNodes(getPodsAssignedToNode, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
nodesTraversed.Store(node.Name, node)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodesTraversed.Load(node.Name); !exists {
|
||||
t.Errorf("Node %v was not proccesed", node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeFit(t *testing.T) {
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
|
||||
nodeNolabel := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
@@ -763,7 +961,7 @@ func TestNodeFit(t *testing.T) {
|
||||
err error
|
||||
}{
|
||||
{
|
||||
description: "insufficient cpu",
|
||||
description: "Insufficient cpu",
|
||||
pod: test.BuildTestPod("p1", 10000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
@@ -772,7 +970,7 @@ func TestNodeFit(t *testing.T) {
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "insufficient pod num",
|
||||
description: "Insufficient pod num",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
@@ -781,6 +979,47 @@ func TestNodeFit(t *testing.T) {
|
||||
},
|
||||
err: errors.New("insufficient pods"),
|
||||
},
|
||||
{
|
||||
description: "Pod matches inter-pod anti-affinity rule of other pod on node",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"),
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because pod and other pod is not same namespace",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, func(pod *v1.Pod) {
|
||||
pod.Namespace = "test"
|
||||
}), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because other pod not match labels of pod",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo1", "bar1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because node have no topologyKey",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, "node1", nil), "foo", "bar"),
|
||||
node: nodeNolabel,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod fits on node",
|
||||
pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -804,8 +1043,9 @@ func TestNodeFit(t *testing.T) {
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
if errs := NodeFit(getPodsAssignedToNode, tc.pod, tc.node); (len(errs) == 0 && tc.err != nil) || errs[0].Error() != tc.err.Error() {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, errs, tc.err)
|
||||
err = NodeFit(getPodsAssignedToNode, tc.pod, tc.node)
|
||||
if (err == nil && tc.err != nil) || (err != nil && err.Error() != tc.err.Error()) {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, err, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -99,9 +99,6 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
||||
}
|
||||
}
|
||||
return func(pod *v1.Pod) bool {
|
||||
if o.filter != nil && !o.filter(pod) {
|
||||
return false
|
||||
}
|
||||
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
|
||||
return false
|
||||
}
|
||||
@@ -111,6 +108,9 @@ func (o *Options) BuildFilterFunc() (FilterFunc, error) {
|
||||
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
|
||||
return false
|
||||
}
|
||||
if o.filter != nil && !o.filter(pod) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, nil
|
||||
}
|
||||
@@ -142,21 +142,25 @@ func BuildGetPodsAssignedToNodeFunc(podInformer cache.SharedIndexInformer) (GetP
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods, nil
|
||||
return ConvertToPods(objs, filter), nil
|
||||
}
|
||||
return getPodsAssignedToNode, nil
|
||||
}
|
||||
|
||||
func ConvertToPods(objs []interface{}, filter FilterFunc) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if filter == nil || filter(pod) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// ListPodsOnNodes returns all pods on given nodes.
|
||||
func ListPodsOnNodes(nodes []*v1.Node, getPodsAssignedToNode GetPodsAssignedToNodeFunc, filter FilterFunc) ([]*v1.Pod, error) {
|
||||
pods := make([]*v1.Pod, 0)
|
||||
@@ -215,6 +219,14 @@ func OwnerRef(pod *v1.Pod) []metav1.OwnerReference {
|
||||
return pod.ObjectMeta.GetOwnerReferences()
|
||||
}
|
||||
|
||||
func OwnerRefUIDs(pod *v1.Pod) []string {
|
||||
var ownerRefUIDs []string
|
||||
for _, ownerRef := range OwnerRef(pod) {
|
||||
ownerRefUIDs = append(ownerRefUIDs, string(ownerRef.UID))
|
||||
}
|
||||
return ownerRefUIDs
|
||||
}
|
||||
|
||||
func IsBestEffortPod(pod *v1.Pod) bool {
|
||||
return utils.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
@@ -257,3 +269,12 @@ func SortPodsBasedOnAge(pods []*v1.Pod) {
|
||||
return pods[i].CreationTimestamp.Before(&pods[j].CreationTimestamp)
|
||||
})
|
||||
}
|
||||
|
||||
func GroupByNodeName(pods []*v1.Pod) map[string][]*v1.Pod {
|
||||
m := make(map[string][]*v1.Pod)
|
||||
for i := 0; i < len(pods); i++ {
|
||||
pod := pods[i]
|
||||
m[pod.Spec.NodeName] = append(m[pod.Spec.NodeName], pod)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -176,3 +176,67 @@ func TestSortPodsBasedOnAge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGroupByNodeName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*v1.Pod
|
||||
expMap map[string][]*v1.Pod
|
||||
}{
|
||||
{
|
||||
name: "list of pods is empty",
|
||||
pods: []*v1.Pod{},
|
||||
expMap: map[string][]*v1.Pod{},
|
||||
},
|
||||
{
|
||||
name: "pods are on same node",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "pods are on different nodes",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
expMap: map[string][]*v1.Pod{
|
||||
"node1": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
}},
|
||||
},
|
||||
"node2": {
|
||||
{Spec: v1.PodSpec{
|
||||
NodeName: "node2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resultMap := GroupByNodeName(test.pods)
|
||||
if !reflect.DeepEqual(resultMap, test.expMap) {
|
||||
t.Errorf("Expected %v node map, got %v", test.expMap, resultMap)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
@@ -54,7 +53,7 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
|
||||
internalPolicy := &api.DeschedulerPolicy{}
|
||||
var err error
|
||||
|
||||
decoder := scheme.Codecs.UniversalDecoder(v1alpha1.SchemeGroupVersion, v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion)
|
||||
decoder := scheme.Codecs.UniversalDecoder(v1alpha2.SchemeGroupVersion, api.SchemeGroupVersion)
|
||||
if err := runtime.DecodeInto(decoder, policy, internalPolicy); err != nil {
|
||||
return nil, fmt.Errorf("failed decoding descheduler's policy config %q: %v", policyConfigFile, err)
|
||||
}
|
||||
|
||||
@@ -21,795 +21,37 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
func TestV1alpha1ToV1alpha2(t *testing.T) {
|
||||
SetupPlugins()
|
||||
defaultEvictorPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
type testCase struct {
|
||||
description string
|
||||
policy *v1alpha1.DeschedulerPolicy
|
||||
err error
|
||||
result *api.DeschedulerPolicy
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "RemoveFailedPods enabled, LowNodeUtilization disabled strategies to profile",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
Namespaces: &v1alpha1.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: false,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Exclude: []string{
|
||||
"test2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Disabled strategy is not generating internal plugin since it is not being used internally currently
|
||||
// {
|
||||
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
// PluginConfigs: []api.PluginConfig{
|
||||
// {
|
||||
// Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
// Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
// Thresholds: api.ResourceThresholds{
|
||||
// "cpu": api.Percentage(20),
|
||||
// [...]
|
||||
// [...]
|
||||
// },
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "convert all strategies",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removefailedpods.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pass in all params to check args",
|
||||
policy: &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
removeduplicates.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
RemoveDuplicates: &v1alpha1.RemoveDuplicates{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeutilization.HighNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
removefailedpods.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
FailedPods: &v1alpha1.FailedPods{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodshavingtoomanyrestarts.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
PodsHavingTooManyRestarts: &v1alpha1.PodsHavingTooManyRestarts{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
removepodsviolatinginterpodantiaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
removepodsviolatingnodeaffinity.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingnodetaints.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
removepodsviolatingtopologyspreadconstraint.PluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
IncludeSoftConstraints: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.HighNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.HighNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.HighNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", nodeutilization.LowNodeUtilizationPluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
UseDeviationThresholds: true,
|
||||
NumberOfNodes: 3,
|
||||
Thresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(20),
|
||||
"memory": api.Percentage(20),
|
||||
"pods": api.Percentage(20),
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
"cpu": api.Percentage(50),
|
||||
"memory": api.Percentage(50),
|
||||
"pods": api.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{nodeutilization.LowNodeUtilizationPluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removeduplicates.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removeduplicates.PluginName,
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removeduplicates.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removefailedpods.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
Reasons: []string{"NodeAffinity"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removefailedpods.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodshavingtoomanyrestarts.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodshavingtoomanyrestarts.PluginName,
|
||||
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
|
||||
PodRestartThreshold: 100,
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodshavingtoomanyrestarts.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatinginterpodantiaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatinginterpodantiaffinity.PluginName,
|
||||
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatinginterpodantiaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodeaffinity.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodeaffinity.PluginName,
|
||||
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
|
||||
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodeaffinity.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingnodetaints.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingnodetaints.PluginName,
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
|
||||
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingnodetaints.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", removepodsviolatingtopologyspreadconstraint.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
|
||||
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
|
||||
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
|
||||
TopologyBalanceNodeFit: utilpointer.Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{removepodsviolatingtopologyspreadconstraint.PluginName},
|
||||
},
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "invalid strategy name",
|
||||
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||
"InvalidName": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{},
|
||||
},
|
||||
}},
|
||||
result: nil,
|
||||
err: fmt.Errorf("unknown strategy name: InvalidName"),
|
||||
},
|
||||
{
|
||||
description: "invalid threshold priority",
|
||||
policy: &v1alpha1.DeschedulerPolicy{Strategies: v1alpha1.StrategyList{
|
||||
nodeutilization.LowNodeUtilizationPluginName: v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
Params: &v1alpha1.StrategyParameters{
|
||||
ThresholdPriority: utilpointer.Int32(100),
|
||||
ThresholdPriorityClassName: "name",
|
||||
NodeResourceUtilizationThresholds: &v1alpha1.NodeResourceUtilizationThresholds{
|
||||
Thresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(20),
|
||||
"memory": v1alpha1.Percentage(20),
|
||||
"pods": v1alpha1.Percentage(20),
|
||||
},
|
||||
TargetThresholds: v1alpha1.ResourceThresholds{
|
||||
"cpu": v1alpha1.Percentage(50),
|
||||
"memory": v1alpha1.Percentage(50),
|
||||
"pods": v1alpha1.Percentage(50),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
result: nil,
|
||||
err: fmt.Errorf("priority threshold misconfigured for plugin LowNodeUtilization"),
|
||||
},
|
||||
}
|
||||
// scope contains information about an ongoing conversion.
|
||||
type scope struct {
|
||||
converter *conversion.Converter
|
||||
meta *conversion.Meta
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
result := &api.DeschedulerPolicy{}
|
||||
scope := scope{}
|
||||
err := v1alpha1.V1alpha1ToInternal(tc.policy, pluginregistry.PluginRegistry, result, scope)
|
||||
if err != nil {
|
||||
if err.Error() != tc.err.Error() {
|
||||
t.Errorf("unexpected error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// sort to easily compare deepequality
|
||||
result.Profiles = api.SortDeschedulerProfileByName(result.Profiles)
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
// Convert continues a conversion.
|
||||
func (s scope) Convert(src, dest interface{}) error {
|
||||
return s.converter.Convert(src, dest, s.meta)
|
||||
}
|
||||
|
||||
// Meta returns the meta object that was originally passed to Convert.
|
||||
func (s scope) Meta() *conversion.Meta {
|
||||
return s.meta
|
||||
}
|
||||
|
||||
func TestDecodeVersionedPolicy(t *testing.T) {
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
SetupPlugins()
|
||||
defaultEvictorPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(utils.SystemCriticalPriority),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
policy []byte
|
||||
@@ -817,124 +59,6 @@ func TestDecodeVersionedPolicy(t *testing.T) {
|
||||
result *api.DeschedulerPolicy
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "v1alpha1 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
defaultEvictorPluginConfig,
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: null
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: fmt.Sprintf("strategy-%s-profile", podlifetime.PluginName),
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilpointer.Int32(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: podlifetime.PluginName,
|
||||
Args: &podlifetime.PodLifeTimeArgs{
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"testleaderelection-a"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: utilpointer.Uint(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{podlifetime.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha1 to internal with priorityThreshold value and name should return error",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "testleaderelection-a"
|
||||
thresholdPriority: 222
|
||||
thresholdPriorityClassName: prioritym
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("failed decoding descheduler's policy config \"filename\": priority threshold misconfigured for plugin PodLifeTime"),
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
@@ -947,6 +71,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
@@ -968,7 +93,8 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
@@ -1099,6 +225,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1123,14 +250,15 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1161,6 +289,7 @@ profiles:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
evictDaemonSetPods: true
|
||||
nodeFit: true
|
||||
- name: "RemoveFailedPods"
|
||||
plugins:
|
||||
@@ -1179,14 +308,15 @@ profiles:
|
||||
EvictSystemCriticalPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
EvictLocalStoragePods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilpointer.Int32(2000000000)},
|
||||
EvictDaemonSetPods: true,
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: removefailedpods.PluginName,
|
||||
Args: &removefailedpods.RemoveFailedPodsArgs{
|
||||
MinPodLifetimeSeconds: utilpointer.Uint(3600),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](3600),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
componentconfigv1alpha1 "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
@@ -57,10 +56,8 @@ func init() {
|
||||
|
||||
utilruntime.Must(componentconfig.AddToScheme(Scheme))
|
||||
utilruntime.Must(componentconfigv1alpha1.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1alpha1.AddToScheme(Scheme))
|
||||
utilruntime.Must(v1alpha2.AddToScheme(Scheme))
|
||||
utilruntime.Must(Scheme.SetVersionPriority(
|
||||
v1alpha2.SchemeGroupVersion,
|
||||
v1alpha1.SchemeGroupVersion,
|
||||
))
|
||||
}
|
||||
|
||||
@@ -46,10 +46,6 @@ func (hi *HandleImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return hi.EvictorFilterImpl.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
func (hi *HandleImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) error {
|
||||
return hi.PodEvictorImpl.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return hi.PodEvictorImpl.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
@@ -16,13 +16,16 @@ package defaultevictor
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -45,7 +48,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
args runtime.Object
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
}
|
||||
@@ -62,15 +65,17 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
// nolint: gocyclo
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
|
||||
ev := &DefaultEvictor{}
|
||||
ev.handle = handle
|
||||
ev.args = defaultEvictorArgs
|
||||
ev := &DefaultEvictor{
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
@@ -122,6 +127,15 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
@@ -143,6 +157,44 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinPodAge != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
@@ -152,9 +204,8 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
defaultEvictorArgs := d.args.(*DefaultEvictorArgs)
|
||||
if defaultEvictorArgs.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), defaultEvictorArgs.NodeSelector)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
@@ -175,11 +226,6 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -199,9 +245,36 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", errors.NewAggregate(checkErrs).Error())
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func getPodIndexerByOwnerRefs(indexName string, handle frameworktypes.Handle) (cache.Indexer, error) {
|
||||
podInformer := handle.SharedInformerFactory().Core().V1().Pods().Informer()
|
||||
indexer := podInformer.GetIndexer()
|
||||
|
||||
// do not reinitialize the indexer, if it's been defined already
|
||||
for name := range indexer.GetIndexers() {
|
||||
if name == indexName {
|
||||
return indexer, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := podInformer.AddIndexers(cache.Indexers{
|
||||
indexName: func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, errors.New("unexpected object")
|
||||
}
|
||||
|
||||
return podutil.OwnerRefUIDs(pod), nil
|
||||
},
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return indexer, nil
|
||||
}
|
||||
|
||||
@@ -15,11 +15,16 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -30,6 +35,20 @@ import (
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
minPodAge *metav1.Duration
|
||||
result bool
|
||||
}
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
|
||||
@@ -38,17 +57,6 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
@@ -304,45 +312,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -360,20 +330,12 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
lowPriority := int32(800)
|
||||
highPriority := int32(900)
|
||||
|
||||
minPodAge := metav1.Duration{Duration: 50 * time.Minute}
|
||||
|
||||
nodeTaintKey := "hardware"
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
result bool
|
||||
}
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
@@ -527,7 +489,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is part of a daemonSet",
|
||||
description: "Pod not evicted because it is part of a daemonSet",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -538,7 +500,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
@@ -549,7 +511,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is a mirror poddsa",
|
||||
description: "Pod not evicted because it is a mirror poddsa",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -560,7 +522,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -572,7 +534,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has system critical priority",
|
||||
description: "Pod not evicted because it has system critical priority",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -584,7 +546,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -599,7 +561,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
||||
description: "Pod not evicted because it has a priority higher than the configured priority threshold",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -611,7 +573,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -624,7 +586,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -636,7 +598,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -649,7 +611,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -661,7 +623,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
@@ -704,6 +666,79 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 2, owner with 2 replicas, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minReplicas of 3, owner with 2 replicas, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
},
|
||||
minReplicas: 3,
|
||||
result: false,
|
||||
}, {
|
||||
description: "minReplicas of 2, multiple owners, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
description: "minPodAge of 50, pod created 10 minutes ago, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-10))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: false,
|
||||
}, {
|
||||
description: "minPodAge of 50, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: true,
|
||||
}, {
|
||||
description: "nil minPodAge, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
podStartTime := metav1.Now().Add(time.Minute * time.Duration(-60))
|
||||
pod.Status.StartTime = &metav1.Time{Time: podStartTime}
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -712,45 +747,7 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -762,3 +759,95 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReinitialization(t *testing.T) {
|
||||
n1 := test.BuildTestNode("node1", 1000, 2000, 13, nil)
|
||||
ownerRefUUID := uuid.NewUUID()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "minReplicas of 2, multiple owners, eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = append(test.GetNormalPodOwnerRefList(), test.GetNormalPodOwnerRefList()...)
|
||||
pod.ObjectMeta.OwnerReferences[0].UID = ownerRefUUID
|
||||
}),
|
||||
test.BuildTestPod("p2", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
evictorPlugin, err := initializePlugin(ctx, test)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
defaultEvictor, ok := evictorPlugin.(*DefaultEvictor)
|
||||
if !ok {
|
||||
t.Fatalf("Unable to initialize as a DefaultEvictor plugin")
|
||||
}
|
||||
_, err = New(defaultEvictor.args, defaultEvictor.handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to reinitialize the plugin: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin, error) {
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
},
|
||||
NodeFit: test.nodeFit,
|
||||
MinReplicas: test.minReplicas,
|
||||
MinPodAge: test.minPodAge,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
return evictorPlugin, nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,9 @@ func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -35,6 +35,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "",
|
||||
EvictLocalStoragePods: false,
|
||||
EvictDaemonSetPods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
@@ -48,24 +49,26 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
|
||||
in: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
want: &DefaultEvictorArgs{
|
||||
NodeSelector: "NodeSelector",
|
||||
EvictLocalStoragePods: true,
|
||||
EvictDaemonSetPods: true,
|
||||
EvictSystemCriticalPods: true,
|
||||
IgnorePvcPods: true,
|
||||
EvictFailedBarePods: true,
|
||||
LabelSelector: nil,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: pointer.Int32(800),
|
||||
Value: utilptr.To[int32](800),
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
|
||||
@@ -25,12 +25,15 @@ import (
|
||||
type DefaultEvictorArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
NodeSelector string `json:"nodeSelector"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@ package defaultevictor
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -26,5 +28,9 @@ func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -41,6 +41,11 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||
*out = new(api.PriorityThreshold)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MinPodAge != nil {
|
||||
in, out := &in.MinPodAge, &out.MinPodAge
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -144,6 +145,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -25,16 +25,13 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
@@ -115,6 +112,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -167,6 +165,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -448,20 +447,16 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
for _, pod := range testCase.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range testCase.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(testCase.evictedPods) > 0 {
|
||||
@@ -479,50 +474,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultevictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
@@ -623,55 +574,16 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
fakeClient,
|
||||
"policy/v1",
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
|
||||
defaultevictor.DefaultEvictorArgs{},
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultevictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -191,6 +192,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
sourceNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
resourceNames,
|
||||
continueEvictionCond)
|
||||
|
||||
@@ -22,21 +22,18 @@ import (
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -175,18 +172,23 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "namespace1"
|
||||
}),
|
||||
// These won't be evicted.
|
||||
@@ -242,12 +244,15 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace3"
|
||||
}),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace4"
|
||||
}),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// TODO(zhifei92): add ownerRef for pod
|
||||
pod.Namespace = "namespace5"
|
||||
}),
|
||||
// These won't be evicted.
|
||||
@@ -271,6 +276,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -326,6 +332,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 300, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -396,6 +403,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -419,7 +427,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
// All pods are assumed to be burstable (tc.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
@@ -463,6 +471,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -538,6 +547,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
@@ -620,6 +630,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -684,6 +695,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -776,6 +788,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -827,6 +840,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
@@ -838,35 +852,27 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range test.evictedPods {
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(test.evictedPods) > 0 {
|
||||
if len(tc.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
@@ -881,65 +887,26 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policy.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
test.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
EvictableNamespaces: test.evictableNamespaces,
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, test.nodes)
|
||||
plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if test.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", test.expectedPodsEvicted, podsEvicted)
|
||||
if tc.expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", tc.expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
@@ -971,11 +938,14 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
var uint0, uint1 uint = 0, 1
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
evictionsExpected uint
|
||||
name string
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionsExpected uint
|
||||
}{
|
||||
{
|
||||
name: "No taints",
|
||||
@@ -1031,6 +1001,26 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
},
|
||||
evictionsExpected: 1,
|
||||
},
|
||||
{
|
||||
name: "Pod which tolerates node taint, set maxPodsToEvictTotal(0), should not be expelled",
|
||||
nodes: []*v1.Node{n1, n3withTaints},
|
||||
pods: []*v1.Pod{
|
||||
// Node 1 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_2_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_3_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_4_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_5_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_6_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
test.BuildTestPod(fmt.Sprintf("pod_7_%s", n1.Name), 200, 0, n1.Name, test.SetRSOwnerRef),
|
||||
podThatToleratesTaint,
|
||||
// Node 3 pods
|
||||
test.BuildTestPod(fmt.Sprintf("pod_9_%s", n3withTaints.Name), 200, 0, n3withTaints.Name, test.SetRSOwnerRef),
|
||||
},
|
||||
maxPodsToEvictPerNode: &uint1,
|
||||
maxPodsToEvictTotal: &uint0,
|
||||
evictionsExpected: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range tests {
|
||||
@@ -1045,56 +1035,16 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
fakeClient,
|
||||
policy.SchemeGroupVersion.String(),
|
||||
false,
|
||||
&item.evictionsExpected,
|
||||
evictions.NewOptions().WithMaxPodsToEvictPerNode(&item.evictionsExpected),
|
||||
defaultevictor.DefaultEvictorArgs{NodeFit: true},
|
||||
nil,
|
||||
item.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: true,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
|
||||
@@ -222,6 +222,7 @@ func evictPodsFromSourceNodes(
|
||||
evictableNamespaces *api.Namespaces,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
@@ -273,8 +274,14 @@ func evictPodsFromSourceNodes(
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, continueEviction)
|
||||
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,8 +293,9 @@ func evictPods(
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
) error {
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
@@ -309,45 +317,52 @@ func evictPods(
|
||||
continue
|
||||
}
|
||||
|
||||
if preEvictionFilterWithOptions(pod) {
|
||||
if podEvictor.Evict(ctx, pod, evictions.EvictOptions{}) {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if podEvictor.NodeLimitExceeded(nodeInfo.node) {
|
||||
return
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
|
||||
return err
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
|
||||
@@ -24,15 +24,15 @@ import (
|
||||
type LowNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds"`
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces"`
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
@@ -42,9 +42,9 @@ type HighNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces"`
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: pointer.Uint(600),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](600),
|
||||
States: []string{"Pending"},
|
||||
},
|
||||
want: &PodLifeTimeArgs{
|
||||
@@ -55,7 +55,7 @@ func TestSetDefaults_PodLifeTimeArgs(t *testing.T) {
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
MaxPodLifeTimeSeconds: pointer.Uint(600),
|
||||
MaxPodLifeTimeSeconds: utilptr.To[uint](600),
|
||||
States: []string{"Pending"},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -67,17 +68,42 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
return podAgeSeconds > *podLifeTimeArgs.MaxPodLifeTimeSeconds
|
||||
podAgeSeconds := int(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
|
||||
return podAgeSeconds > int(*podLifeTimeArgs.MaxPodLifeTimeSeconds)
|
||||
})
|
||||
|
||||
if len(podLifeTimeArgs.States) > 0 {
|
||||
states := sets.New(podLifeTimeArgs.States...)
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
// Pod Status Phase
|
||||
if states.Has(string(pod.Status.Phase)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Pod Status Reason
|
||||
if states.Has(pod.Status.Reason) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Init Container Status Reason
|
||||
if podLifeTimeArgs.IncludingInitContainers {
|
||||
for _, containerStatus := range pod.Status.InitContainerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ephemeral Container Status Reason
|
||||
if podLifeTimeArgs.IncludingEphemeralContainers {
|
||||
for _, containerStatus := range pod.Status.EphemeralContainerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Container Status Reason
|
||||
for _, containerStatus := range pod.Status.ContainerStatuses {
|
||||
if containerStatus.State.Waiting != nil && states.Has(containerStatus.State.Waiting.Reason) {
|
||||
return true
|
||||
@@ -123,9 +149,19 @@ func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *framewo
|
||||
// in the event that PDB or settings such maxNoOfPodsToEvictPer* prevent too much eviction
|
||||
podutil.SortPodsBasedOnAge(podsToEvict)
|
||||
|
||||
loop:
|
||||
for _, pod := range podsToEvict {
|
||||
if !d.handle.Evictor().NodeLimitExceeded(nodeMap[pod.Spec.NodeName]) {
|
||||
d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
err := d.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError:
|
||||
continue loop
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,17 +22,14 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -72,7 +69,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p5.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
|
||||
p6.Namespace = "dev"
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 605))
|
||||
p6.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(-time.Second * 605))
|
||||
|
||||
ownerRef3 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef3
|
||||
@@ -84,7 +81,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p7.ObjectMeta.CreationTimestamp = newerPodCreationTime
|
||||
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
|
||||
p8.Namespace = "dev"
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(time.Second * 595))
|
||||
p8.ObjectMeta.CreationTimestamp = metav1.NewTime(time.Now().Add(-time.Second * 595))
|
||||
|
||||
ownerRef4 := test.GetReplicaSetOwnerRefList()
|
||||
p5.ObjectMeta.OwnerReferences = ownerRef4
|
||||
@@ -140,6 +137,12 @@ func TestPodLifeTime(t *testing.T) {
|
||||
p14.DeletionTimestamp = &metav1.Time{}
|
||||
p15.DeletionTimestamp = &metav1.Time{}
|
||||
|
||||
p16 := test.BuildTestPod("p16", 100, 0, node1.Name, nil)
|
||||
p16.Namespace = "dev"
|
||||
p16.ObjectMeta.CreationTimestamp = olderPodCreationTime
|
||||
p16.Status.Phase = v1.PodUnknown
|
||||
p16.ObjectMeta.OwnerReferences = ownerRef1
|
||||
|
||||
var maxLifeTime uint = 600
|
||||
testCases := []struct {
|
||||
description string
|
||||
@@ -150,6 +153,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
ignorePvcPods bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
applyPodsFunc func(pods []*v1.Pod)
|
||||
}{
|
||||
{
|
||||
@@ -308,6 +312,17 @@ func TestPodLifeTime(t *testing.T) {
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "1 Oldest pod should be evicted when maxPodsToEvictTotal is set to 1",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
},
|
||||
pods: []*v1.Pod{p1, p2, p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](2),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
expectedEvictedPodCount: 1,
|
||||
},
|
||||
{
|
||||
description: "1 Oldest pod should be evicted when maxPodsToEvictPerNode is set to 1",
|
||||
args: &PodLifeTimeArgs{
|
||||
@@ -337,6 +352,244 @@ func TestPodLifeTime(t *testing.T) {
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CrashLoopBackOff should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CrashLoopBackOff"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CreateContainerConfigError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerConfigError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status ErrImagePull should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"ErrImagePull"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "ErrImagePull"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with init container status CreateContainerError should not be evicted without includingInitContainers",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with init container status CreateContainerError should be evicted with includingInitContainers",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with ephemeral container status CreateContainerError should not be evicted without includingEphemeralContainers",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.InitContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with ephemeral container status CreateContainerError should be evicted with includingEphemeralContainers",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
IncludingEphemeralContainers: true,
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.EphemeralContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status CreateContainerError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"CreateContainerError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerError"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with container status InvalidImageName should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"InvalidImageName"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.ContainerStatuses = []v1.ContainerStatus{
|
||||
{
|
||||
State: v1.ContainerState{
|
||||
Waiting: &v1.ContainerStateWaiting{Reason: "InvalidImageName"},
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason NodeLost should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"NodeLost"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "NodeLost"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason NodeAffinity should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"NodeAffinity"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "NodeAffinity"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason Shutdown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"Shutdown"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "Shutdown"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status reason UnexpectedAdmissionError should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{"UnexpectedAdmissionError"},
|
||||
},
|
||||
pods: []*v1.Pod{p9},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Reason = "UnexpectedAdmissionError"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status phase v1.PodUnknown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{string(v1.PodUnknown)},
|
||||
},
|
||||
pods: []*v1.Pod{p16},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Phase = v1.PodUnknown
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod without ImagePullBackOff States should be ignored",
|
||||
args: &PodLifeTimeArgs{
|
||||
@@ -376,55 +629,21 @@ func TestPodLifeTime(t *testing.T) {
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(
|
||||
ctx,
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: tc.ignorePvcPods,
|
||||
EvictFailedBarePods: false,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
evictions.NewOptions().
|
||||
WithMaxPodsToEvictPerNode(tc.maxPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(tc.maxPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(tc.maxPodsToEvictTotal),
|
||||
defaultevictor.DefaultEvictorArgs{IgnorePvcPods: tc.ignorePvcPods},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(tc.args, &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
})
|
||||
plugin, err := New(tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -25,8 +25,10 @@ import (
|
||||
type PodLifeTimeArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Namespaces *api.Namespaces `json:"namespaces"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds"`
|
||||
States []string `json:"states"`
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
States []string `json:"states,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
IncludingEphemeralContainers bool `json:"includingEphemeralContainers,omitempty"`
|
||||
}
|
||||
|
||||
@@ -44,15 +44,29 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
}
|
||||
}
|
||||
podLifeTimeAllowedStates := sets.New(
|
||||
// Pod Status Phase
|
||||
string(v1.PodRunning),
|
||||
string(v1.PodPending),
|
||||
string(v1.PodUnknown),
|
||||
|
||||
// Pod Status Reasons
|
||||
"NodeAffinity",
|
||||
"NodeLost",
|
||||
"Shutdown",
|
||||
"UnexpectedAdmissionError",
|
||||
|
||||
// Container Status Reasons
|
||||
// Container state reasons: https://github.com/kubernetes/kubernetes/blob/release-1.24/pkg/kubelet/kubelet_pods.go#L76-L79
|
||||
"PodInitializing",
|
||||
"ContainerCreating",
|
||||
|
||||
// containerStatuses[*].state.waiting.reason: ImagePullBackOff
|
||||
// containerStatuses[*].state.waiting.reason: ImagePullBackOff, etc.
|
||||
"ImagePullBackOff",
|
||||
"CrashLoopBackOff",
|
||||
"CreateContainerConfigError",
|
||||
"ErrImagePull",
|
||||
"CreateContainerError",
|
||||
"InvalidImageName",
|
||||
)
|
||||
|
||||
if !podLifeTimeAllowedStates.HasAll(args.States...) {
|
||||
|
||||
@@ -36,6 +36,14 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons CrashLoopBackOff ",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
|
||||
@@ -210,9 +210,17 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
// TODO(jchaloup): check if the pod has a different node to lend to
|
||||
for _, pod := range pods[upperAvg-1:] {
|
||||
r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{})
|
||||
if r.handle.Evictor().NodeLimitExceeded(nodeMap[nodeName]) {
|
||||
err := r.handle.Evictor().Evict(ctx, pod, evictions.EvictOptions{StrategyName: PluginName})
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError:
|
||||
continue loop
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -238,7 +246,7 @@ func getTargetNodes(podNodes map[string][]*v1.Pod, nodes []*v1.Node) []*v1.Node
|
||||
utils.NodeSelectorsEqual(getNodeAffinityNodeSelector(pod), getNodeAffinityNodeSelector(dp)) &&
|
||||
reflect.DeepEqual(pod.Spec.NodeSelector, dp.Spec.NodeSelector) {
|
||||
duplicated = true
|
||||
continue
|
||||
break
|
||||
}
|
||||
}
|
||||
if duplicated {
|
||||
|
||||
@@ -20,21 +20,15 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/tools/events"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
@@ -300,55 +294,9 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: testCase.nodefit}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
"v1",
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
nodeFit := testCase.nodefit
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: nodeFit,
|
||||
}
|
||||
|
||||
evictorFilter, _ := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveDuplicatesArgs{
|
||||
@@ -749,55 +697,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultEvictorFilterArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultEvictorFilterArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
handle := &frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveDuplicatesArgs{},
|
||||
|
||||
@@ -24,6 +24,6 @@ import (
|
||||
type RemoveDuplicatesArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Namespaces *api.Namespaces `json:"namespaces"`
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds"`
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
48
pkg/framework/plugins/removeduplicates/validation_test.go
Normal file
48
pkg/framework/plugins/removeduplicates/validation_test.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemoveDuplicatesArgs
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
args: &RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
args: &RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: []string{"Job"},
|
||||
Namespaces: &api.Namespaces{
|
||||
Include: []string{"default"},
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemoveDuplicatesArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@ package removefailedpods
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@@ -36,7 +36,7 @@ func SetDefaults_RemoveFailedPodsArgs(obj runtime.Object) {
|
||||
args.ExcludeOwnerKinds = nil
|
||||
}
|
||||
if args.MinPodLifetimeSeconds == nil {
|
||||
args.MinPodLifetimeSeconds = utilpointer.Uint(3600)
|
||||
args.MinPodLifetimeSeconds = utilptr.To[uint](3600)
|
||||
}
|
||||
if args.Reasons == nil {
|
||||
args.Reasons = nil
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/utils/pointer"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSetDefaults_RemoveFailedPodsArgs(t *testing.T) {
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
MinPodLifetimeSeconds: pointer.Uint(0),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](0),
|
||||
Reasons: []string{"reason"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
@@ -56,7 +56,7 @@ func TestSetDefaults_RemoveFailedPodsArgs(t *testing.T) {
|
||||
Namespaces: &api.Namespaces{},
|
||||
LabelSelector: &metav1.LabelSelector{},
|
||||
ExcludeOwnerKinds: []string{"ReplicaSet"},
|
||||
MinPodLifetimeSeconds: pointer.Uint(0),
|
||||
MinPodLifetimeSeconds: utilptr.To[uint](0),
|
||||
Reasons: []string{"reason"},
|
||||
IncludingInitContainers: true,
|
||||
},
|
||||
|
||||
@@ -102,10 +102,19 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
|
||||
}
|
||||
}
|
||||
totalPods := len(pods)
|
||||
loop:
|
||||
for i := 0; i < totalPods; i++ {
|
||||
d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{})
|
||||
if d.handle.Evictor().NodeLimitExceeded(node) {
|
||||
break
|
||||
err := d.handle.Evictor().Evict(ctx, pods[i], evictions.EvictOptions{StrategyName: PluginName})
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError:
|
||||
break loop
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -148,6 +157,17 @@ func validateCanEvict(pod *v1.Pod, failedPodArgs *RemoveFailedPodsArgs) error {
|
||||
}
|
||||
}
|
||||
|
||||
if len(failedPodArgs.ExitCodes) > 0 {
|
||||
exitCodes := getFailedContainerStatusExitCodes(pod.Status.ContainerStatuses)
|
||||
if failedPodArgs.IncludingInitContainers {
|
||||
exitCodes = append(exitCodes, getFailedContainerStatusExitCodes(pod.Status.InitContainerStatuses)...)
|
||||
}
|
||||
|
||||
if !sets.New(failedPodArgs.ExitCodes...).HasAny(exitCodes...) {
|
||||
errs = append(errs, fmt.Errorf("pod does not match any of the exitCodes"))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
@@ -165,3 +185,15 @@ func getFailedContainerStatusReasons(containerStatuses []v1.ContainerStatus) []s
|
||||
|
||||
return reasons
|
||||
}
|
||||
|
||||
func getFailedContainerStatusExitCodes(containerStatuses []v1.ContainerStatus) []int32 {
|
||||
exitCodes := make([]int32, 0)
|
||||
|
||||
for _, containerStatus := range containerStatuses {
|
||||
if containerStatus.State.Terminated != nil {
|
||||
exitCodes = append(exitCodes, containerStatus.State.Terminated.ExitCode)
|
||||
}
|
||||
}
|
||||
|
||||
return exitCodes
|
||||
}
|
||||
|
||||
@@ -21,18 +21,13 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/events"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -41,12 +36,15 @@ var OneHourInSeconds uint = 3600
|
||||
func TestRemoveFailedPods(t *testing.T) {
|
||||
createRemoveFailedPodsArgs := func(
|
||||
includingInitContainers bool,
|
||||
reasons, excludeKinds []string,
|
||||
reasons []string,
|
||||
exitCodes []int32,
|
||||
excludeKinds []string,
|
||||
minAgeSeconds *uint,
|
||||
) RemoveFailedPodsArgs {
|
||||
return RemoveFailedPodsArgs{
|
||||
IncludingInitContainers: includingInitContainers,
|
||||
Reasons: reasons,
|
||||
ExitCodes: exitCodes,
|
||||
MinPodLifetimeSeconds: minAgeSeconds,
|
||||
ExcludeOwnerKinds: excludeKinds,
|
||||
}
|
||||
@@ -69,14 +67,14 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "0 failures, 0 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -85,9 +83,70 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity", ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with exitCode 1, expected exitCode 2, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{2}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "2 containers terminated with exitCode 1 and 2 respectively, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1, 2, 3}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 2,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
buildTestPod("p2", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "2 containers terminated with exitCode 1 and 2 respectively, expected exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
buildTestPod("p2", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with reason NodeAffinity, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -96,9 +155,31 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}, nil), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with exitCode 1, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 1},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container terminated with exitCode 2, expected 1, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []int32{1}, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{ExitCode: 2},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 init container waiting with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -109,7 +190,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "2 init container waiting with reason CreateContainerConfigError, 2 nodes, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("node2", 2000, 3000, 10, nil),
|
||||
@@ -126,7 +207,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -137,7 +218,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError+NodeAffinity, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -148,7 +229,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=CreateContainerConfigError, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -159,7 +240,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include init container=false, 1 init container waiting with reason CreateContainerConfigError, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -170,7 +251,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "lifetime 1 hour, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, &OneHourInSeconds),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, &OneHourInSeconds),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -181,7 +262,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("node2", 2000, 2000, 10, func(node *v1.Node) {
|
||||
@@ -198,7 +279,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "nodeFit=true, only available node does not have enough resources, 1 container terminated with reason CreateContainerConfigError, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"CreateContainerConfigError"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 1, 1, 10, nil), test.BuildTestNode("node2", 0, 0, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -210,7 +291,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"ReplicaSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"ReplicaSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -221,7 +302,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 eviction",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"DaemonSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"DaemonSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 1,
|
||||
pods: []*v1.Pod{
|
||||
@@ -232,7 +313,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 pod in termination; nothing should be moved",
|
||||
args: createRemoveFailedPodsArgs(true, nil, []string{"DaemonSet"}, nil),
|
||||
args: createRemoveFailedPodsArgs(true, nil, nil, []string{"DaemonSet"}, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -243,7 +324,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "1 container terminated with reason ShutDown, 0 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, nil, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
@@ -253,7 +334,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
},
|
||||
{
|
||||
description: "include reason=Shutdown, 2 containers terminated with reason ShutDown, 2 evictions",
|
||||
args: createRemoveFailedPodsArgs(false, []string{"Shutdown"}, nil, nil),
|
||||
args: createRemoveFailedPodsArgs(false, []string{"Shutdown"}, nil, nil, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
|
||||
expectedEvictedPodCount: 2,
|
||||
pods: []*v1.Pod{
|
||||
@@ -276,65 +357,21 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: tc.nodeFit}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
fakeClient,
|
||||
policyv1.SchemeGroupVersion.String(),
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
false,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
defaultevictorArgs := &defaultevictor.DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: false,
|
||||
EvictSystemCriticalPods: false,
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
NodeFit: tc.nodeFit,
|
||||
}
|
||||
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
defaultevictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveFailedPodsArgs{
|
||||
Reasons: tc.args.Reasons,
|
||||
ExitCodes: tc.args.ExitCodes,
|
||||
MinPodLifetimeSeconds: tc.args.MinPodLifetimeSeconds,
|
||||
IncludingInitContainers: tc.args.IncludingInitContainers,
|
||||
ExcludeOwnerKinds: tc.args.ExcludeOwnerKinds,
|
||||
LabelSelector: tc.args.LabelSelector,
|
||||
Namespaces: tc.args.Namespaces,
|
||||
},
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
PodEvictorImpl: podEvictor,
|
||||
EvictorFilterImpl: evictorFilter.(frameworktypes.EvictorPlugin),
|
||||
SharedInformerFactoryImpl: sharedInformerFactory,
|
||||
GetPodsAssignedToNodeFuncImpl: getPodsAssignedToNode,
|
||||
},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
|
||||
@@ -25,10 +25,11 @@ import (
|
||||
type RemoveFailedPodsArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Namespaces *api.Namespaces `json:"namespaces"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds"`
|
||||
Reasons []string `json:"reasons"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers"`
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
ExitCodes []int32 `json:"exitCodes,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user