mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
168 Commits
deschedule
...
deschedule
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4d78cd49a0 | ||
|
|
ce56624cea | ||
|
|
dd7b76f2c4 | ||
|
|
bc4f17194b | ||
|
|
ebaf155e23 | ||
|
|
781572fed5 | ||
|
|
e3503d22f4 | ||
|
|
564c2c29d8 | ||
|
|
bbb915e003 | ||
|
|
1db6b615d1 | ||
|
|
e9188852ef | ||
|
|
c36466bb1c | ||
|
|
841fd29282 | ||
|
|
79b2e04199 | ||
|
|
3a608a590a | ||
|
|
07b1c4e681 | ||
|
|
7d6f6fedec | ||
|
|
3033aec6a0 | ||
|
|
9eb582cd67 | ||
|
|
925b388702 | ||
|
|
e599018adb | ||
|
|
f9a3be8cde | ||
|
|
d47e077897 | ||
|
|
483c9c1499 | ||
|
|
d841a2d913 | ||
|
|
f7f86ed075 | ||
|
|
46f55b5221 | ||
|
|
fa9fa70ed7 | ||
|
|
43dc9a9616 | ||
|
|
f8c8d9a385 | ||
|
|
b5bb15ae10 | ||
|
|
94d1825c68 | ||
|
|
257bd55909 | ||
|
|
fd2febbfe1 | ||
|
|
63ecaec0ef | ||
|
|
4740bc3e27 | ||
|
|
8f521bb6f7 | ||
|
|
d641488ea1 | ||
|
|
e5c57a759b | ||
|
|
9c7e01de67 | ||
|
|
eb4c7d102f | ||
|
|
e5ea03ce75 | ||
|
|
2cce141feb | ||
|
|
f2211e1cef | ||
|
|
d9697591d5 | ||
|
|
419fe74702 | ||
|
|
7380aa6e0a | ||
|
|
b84b2623b9 | ||
|
|
d0548b75d7 | ||
|
|
6e9d8891c5 | ||
|
|
5cc9e68127 | ||
|
|
bf6a51f733 | ||
|
|
f15d8d0d54 | ||
|
|
a177744169 | ||
|
|
eadfe4a546 | ||
|
|
fbf11df729 | ||
|
|
e5ab156a99 | ||
|
|
6e714a2134 | ||
|
|
a01fa87de8 | ||
|
|
ba694cfac1 | ||
|
|
2570bedd52 | ||
|
|
89eab59d82 | ||
|
|
ace001c618 | ||
|
|
33894afe2b | ||
|
|
528aff2d42 | ||
|
|
a2b899aa15 | ||
|
|
a1ddb3f28f | ||
|
|
b63b09089e | ||
|
|
2d7528411a | ||
|
|
e06443ef40 | ||
|
|
9f918371a2 | ||
|
|
c8912acfb7 | ||
|
|
1974c12e0f | ||
|
|
b3aeca73db | ||
|
|
d34848086c | ||
|
|
9aa6d79c21 | ||
|
|
7a76d9f0d3 | ||
|
|
71746262b1 | ||
|
|
8b0ae7ce52 | ||
|
|
6691720da5 | ||
|
|
0a691debfb | ||
|
|
fbc875fac1 | ||
|
|
e466307d7c | ||
|
|
9fed73148c | ||
|
|
957c5bc8e0 | ||
|
|
5ce857b3fd | ||
|
|
1f4d20dd6b | ||
|
|
75d9800baf | ||
|
|
045fbb6a04 | ||
|
|
035849c721 | ||
|
|
a3ca3093e5 | ||
|
|
ee376e12ac | ||
|
|
4750dc19e6 | ||
|
|
6bf75bedef | ||
|
|
2b450c15b8 | ||
|
|
2a27d1be90 | ||
|
|
6e1832b1af | ||
|
|
a249c9baf0 | ||
|
|
5d4dc6604a | ||
|
|
96652f3c28 | ||
|
|
6e9622bf41 | ||
|
|
f66804396e | ||
|
|
3a5c651136 | ||
|
|
68207da9c8 | ||
|
|
35a7178df6 | ||
|
|
cca28f7bbe | ||
|
|
38b1f5c1a8 | ||
|
|
c2ed7eb575 | ||
|
|
603473839a | ||
|
|
17b90969cf | ||
|
|
2cce60dc2b | ||
|
|
b4b203cc60 | ||
|
|
59d1d5d1b9 | ||
|
|
98e6ed6587 | ||
|
|
4548723dea | ||
|
|
7542cac9d0 | ||
|
|
8871272d35 | ||
|
|
a31a3b5e85 | ||
|
|
9b9ae9a3be | ||
|
|
c22d773200 | ||
|
|
9ba0f9b410 | ||
|
|
95a631f6a5 | ||
|
|
89535b9b9b | ||
|
|
54d0a22ad1 | ||
|
|
87ba84b2ad | ||
|
|
b300faece0 | ||
|
|
04ebdbee32 | ||
|
|
bda785f7dc | ||
|
|
6ab73d6ac5 | ||
|
|
e283c31030 | ||
|
|
aed345994f | ||
|
|
be4abe1727 | ||
|
|
e14b86eb8c | ||
|
|
a4d6119bcd | ||
|
|
57bb31de78 | ||
|
|
581d997379 | ||
|
|
17d9b152a2 | ||
|
|
b935c7d82c | ||
|
|
b8e3c0bba3 | ||
|
|
5bf11813e6 | ||
|
|
d883c8a9e1 | ||
|
|
50dd3b8971 | ||
|
|
fd9f2b4614 | ||
|
|
655ab516c7 | ||
|
|
0d5301ead2 | ||
|
|
d97f1c9057 | ||
|
|
57a04aae9f | ||
|
|
f3abaf48ae | ||
|
|
88af72b907 | ||
|
|
fa3fb4e954 | ||
|
|
d5b609b34a | ||
|
|
9c6604fc51 | ||
|
|
1a49e116df | ||
|
|
4775db9e2f | ||
|
|
52d5d6398c | ||
|
|
6100c914b4 | ||
|
|
0b3c022c32 | ||
|
|
335c698b38 | ||
|
|
e39ae80628 | ||
|
|
3440abfa41 | ||
|
|
e6d0caa1bc | ||
|
|
e085610bfd | ||
|
|
03246d6843 | ||
|
|
5a201a32d9 | ||
|
|
fc484030b9 | ||
|
|
c02c889734 | ||
|
|
ca7afd60b9 | ||
|
|
7369b1291e |
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
## Description
|
||||
<!-- Please include a summary of the change and which issue is fixed -->
|
||||
|
||||
## Checklist
|
||||
Please ensure your pull request meets the following criteria before submitting
|
||||
for review, these items will be used by reviewers to assess the quality and
|
||||
completeness of your changes:
|
||||
|
||||
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
|
||||
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
|
||||
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
|
||||
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
|
||||
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
|
||||
- [ ] **Error Handling**: Are errors handled appropriately ?
|
||||
- [ ] **Testing**: Are there sufficient unit/integration tests?
|
||||
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
|
||||
- [ ] **Dependencies**: Are new dependencies justified ?
|
||||
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
|
||||
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
|
||||
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
|
||||
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
|
||||
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?
|
||||
6
.github/workflows/manifests.yaml
vendored
6
.github/workflows/manifests.yaml
vendored
@@ -7,10 +7,11 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.32.0"]
|
||||
descheduler-version: ["v0.32.1"]
|
||||
k8s-version: ["v1.34.0"]
|
||||
descheduler-version: ["v0.34.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
@@ -21,6 +22,7 @@ jobs:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
version: ${{ matrix.kind-version }}
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.23.3
|
||||
FROM golang:1.24.6
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -21,7 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
USER 1000
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.62.2
|
||||
GOLANGCI_VERSION := v1.64.8
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
|
||||
3
OWNERS
3
OWNERS
@@ -4,6 +4,7 @@ approvers:
|
||||
- seanmalloy
|
||||
- a7i
|
||||
- knelasevero
|
||||
- ricardomaraschini
|
||||
reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
@@ -13,6 +14,8 @@ reviewers:
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
- googs1025
|
||||
- ricardomaraschini
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
193
README.md
193
README.md
@@ -33,15 +33,16 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.34.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.34.x|[`release-1.34`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.34/README.md)|
|
||||
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -93,17 +94,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -118,32 +119,75 @@ The Descheduler Policy is configurable and includes default strategy plugins tha
|
||||
|
||||
These are top level keys in the Descheduler Policy that you can use to configure all evictions.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
| `metricsCollector` |`object`| `nil` | configures collection of metrics for actual resource utilization |
|
||||
| `metricsCollector.enabled` |`bool`| `false` | enables kubernetes [metrics server](https://kubernetes-sigs.github.io/metrics-server/) collection |
|
||||
| Name | type | Default Value | Description |
|
||||
|------------------------------------|----------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point. |
|
||||
| `maxNoOfPodsToEvictPerNode` | `int` | `nil` | Maximum number of pods evicted from each node (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `int` | `nil` | Maximum number of pods evicted from each namespace (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictTotal` | `int` | `nil` | Maximum number of pods evicted per rescheduling cycle (summed through all strategies). |
|
||||
| `metricsCollector` (deprecated) | `object` | `nil` | Configures collection of metrics for actual resource utilization. |
|
||||
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
|
||||
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
|
||||
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
|
||||
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
|
||||
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
|
||||
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
|
||||
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
|
||||
| `prometheus.authToken.secretReference` |`object`| `nil` | Read the authentication token from a kubernetes secret (the secret is expected to contain the token under `prometheusAuthToken` data key) |
|
||||
| `prometheus.authToken.secretReference.namespace` |`string`| `nil` | Authentication token kubernetes secret namespace (currently, the RBAC configuration permits retrieving secrets from the `kube-system` namespace. If the secret needs to be accessed from a different namespace, the existing RBAC rules must be explicitly extended. |
|
||||
| `prometheus.authToken.secretReference.name` |`string`| `nil` | Authentication token kubernetes secret name |
|
||||
|
||||
The descheduler currently allows to configure a metric collection of Kubernetes Metrics through `metricsProviders` field.
|
||||
The previous way of setting `metricsCollector` field is deprecated. There are currently two sources to configure:
|
||||
- `KubernetesMetrics`: enables metrics collection from Kubernetes Metrics server
|
||||
- `Prometheus`: enables metrics collection from Prometheus server
|
||||
|
||||
In general, each plugin can consume metrics from a different provider so multiple distinct providers can be configured in parallel.
|
||||
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
|
||||
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
|
||||
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
|
||||
| Name | Type | Default Value | Description |
|
||||
|---------------------------|------------------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limits the nodes that are processed. |
|
||||
| `evictLocalStoragePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithLocalStorage"` instead]**<br>Allows eviction of pods using local storage. |
|
||||
| `evictDaemonSetPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"DaemonSetPods"` instead]**<br>Allows eviction of DaemonSet managed Pods. |
|
||||
| `evictSystemCriticalPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"SystemCriticalPods"` instead]**<br>[Warning: Will evict Kubernetes system pods] Allows eviction of pods with any priority, including system-critical pods like kube-dns. |
|
||||
| `ignorePvcPods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithPVC"` instead]**<br>Sets whether PVC pods should be evicted or ignored. |
|
||||
| `evictFailedBarePods` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"FailedBarePods"` instead]**<br>Allows eviction of pods without owner references and in a failed phase. |
|
||||
| `ignorePodsWithoutPDB` | `bool` | `false` | **[Deprecated: Use `podProtections` with `"PodsWithoutPDB"` instead]**<br>Sets whether pods without PodDisruptionBudget should be evicted or ignored. |
|
||||
| `labelSelector` | `metav1.LabelSelector` | | (See [label filtering](#label-filtering)) |
|
||||
| `priorityThreshold` | `priorityThreshold` | | (See [priority filtering](#priority-filtering)) |
|
||||
| `nodeFit` | `bool` | `false` | (See [node fit filtering](#node-fit-filtering)) |
|
||||
| `minReplicas` | `uint` | `0` | Ignores eviction of pods where the owner (e.g., `ReplicaSet`) replicas are below this threshold. |
|
||||
| `minPodAge` | `metav1.Duration` | `0` | Ignores eviction of pods with a creation time within this threshold. |
|
||||
| `noEvictionPolicy` | `enum` | `` | sets whether a `descheduler.alpha.kubernetes.io/prefer-no-eviction` pod annotation is considered preferred or mandatory. Accepted values: "", "Preferred", "Mandatory". Defaults to "Preferred". |
|
||||
| `podProtections` | `PodProtections` | `{}` | Holds the list of enabled and disabled protection pod policies.<br>Users can selectively disable certain default protection rules or enable extra ones. See below for supported values. |
|
||||
|
||||
#### Supported Values for `podProtections.DefaultDisabled`
|
||||
|
||||
> Setting a value in `defaultDisabled` **disables the corresponding default protection rule**. This means the specified type of Pods will **no longer be protected** from eviction and may be evicted if they meet other criteria.
|
||||
|
||||
| Value | Meaning |
|
||||
|--------------------------|-------------------------------------------------------------------------|
|
||||
| `"PodsWithLocalStorage"` | Allow eviction of Pods using local storage. |
|
||||
| `"DaemonSetPods"` | Allow eviction of DaemonSet-managed Pods. |
|
||||
| `"SystemCriticalPods"` | Allow eviction of system-critical Pods. |
|
||||
| `"FailedBarePods"` | Allow eviction of failed bare Pods (without controllers). |
|
||||
|
||||
---
|
||||
|
||||
#### Supported Values for `podProtections.ExtraEnabled`
|
||||
|
||||
> Setting a value in `extraEnabled` **enables an additional protection rule**. This means the specified type of Pods will be **protected** from eviction.
|
||||
|
||||
| Value | Meaning |
|
||||
|----------------------------|------------------------------------------------------------------|
|
||||
| `"PodsWithPVC"` | Prevents eviction of Pods using Persistent Volume Claims (PVCs). |
|
||||
| `"PodsWithoutPDB"` | Prevents eviction of Pods without a PodDisruptionBudget (PDB). |
|
||||
| `"PodsWithResourceClaims"` | Prevents eviction of Pods using ResourceClaims. |
|
||||
|
||||
### Example policy
|
||||
|
||||
@@ -160,16 +204,31 @@ nodeSelector: "node=node1" # you don't need to set this, if not set all will be
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
metricsCollector:
|
||||
enabled: true # you don't need to set this, metrics are not collected if not set
|
||||
gracePeriodSeconds: 60 # you don't need to set this, 0 if not set
|
||||
# you don't need to set this, metrics are not collected if not set
|
||||
metricsProviders:
|
||||
- source: Prometheus
|
||||
prometheus:
|
||||
url: http://prometheus-kube-prometheus-prometheus.prom.svc.cluster.local
|
||||
authToken:
|
||||
secretReference:
|
||||
namespace: "kube-system"
|
||||
name: "authtoken"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
evictFailedBarePods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
#- "PodsWithLocalStorage"
|
||||
#- "SystemCriticalPods"
|
||||
#- "DaemonSetPods"
|
||||
#- "FailedBarePods"
|
||||
extraEnabled:
|
||||
#- "PodsWithPVC"
|
||||
#- "PodsWithoutPDB"
|
||||
#- "PodsWithResourceClaims"
|
||||
nodeFit: true
|
||||
minReplicas: 2
|
||||
plugins:
|
||||
@@ -285,9 +344,14 @@ A resource consumption above (resp. below) this window is considered as overutil
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field.
|
||||
In order to have the plugin consume the metrics the metric collector needs to be configured as well.
|
||||
See `metricsCollector` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field (deprecated)
|
||||
or `metricsUtilization.source` field to `KubernetesMetrics`.
|
||||
In order to have the plugin consume the metrics the metric provider needs to be configured as well.
|
||||
Alternatively, it is possible to create a prometheus client and configure a prometheus query to consume
|
||||
metrics outside of the kubernetes metrics server. The query is expected to return a vector of values for
|
||||
each node. The values are expected to be any real number within <0; 1> interval. During eviction only
|
||||
a single pod is evicted at most from each overutilized node. There's currently no support for evicting more.
|
||||
See `metricsProviders` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -297,9 +361,12 @@ See `metricsCollector` field at [Top Level configuration](#top-level-configurati
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionLimits`|object|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`metricsUtilization`|object|
|
||||
|`metricsUtilization.metricsServer`|bool|
|
||||
|`metricsUtilization.metricsServer` (deprecated)|bool|
|
||||
|`metricsUtilization.source`|string|
|
||||
|`metricsUtilization.prometheus.query`|string|
|
||||
|
||||
|
||||
**Example:**
|
||||
@@ -320,8 +387,12 @@ profiles:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
metricsUtilization:
|
||||
metricsServer: true
|
||||
# metricsUtilization:
|
||||
# source: Prometheus
|
||||
# prometheus:
|
||||
# query: instance:node_cpu:rate:sum
|
||||
evictionLimits:
|
||||
node: 5
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -337,10 +408,12 @@ and will not be used to compute node's usage if it's not specified in `threshold
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
There are two more parameters associated with the `LowNodeUtilization` strategy, called `numberOfNodes` and `evictionLimits`.
|
||||
The first parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
The second parameter is useful when a number of evictions per the plugin per a descheduling cycle needs to be limited.
|
||||
The parameter currently enables to limit the number of evictions per node through `node` field.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
@@ -366,6 +439,12 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
To control pod eviction from underutilized nodes, use the `evictionModes`
|
||||
array. A lenient policy, which evicts pods regardless of their resource
|
||||
requests, is the default. To enable a stricter policy that only evicts pods
|
||||
with resource requests defined for the provided threshold resources, add the
|
||||
option `OnlyThresholdingResources` to the `evictionModes` configuration.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
@@ -378,8 +457,15 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionModes`|list(string)|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Supported Eviction Modes:**
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`OnlyThresholdingResources`|Evict only pods that have resource requests defined for the provided threshold resources.|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
@@ -398,6 +484,8 @@ profiles:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
evictionModes:
|
||||
- "OnlyThresholdingResources"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -672,7 +760,9 @@ profiles:
|
||||
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
|
||||
|
||||
You can also specify `states` parameter to **only** evict pods matching the following conditions:
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Unknown`
|
||||
> The primary purpose for using states like `Succeeded` and `Failed` is releasing resources so that new pods can be rescheduled.
|
||||
> I.e., the main motivation is not for cleaning pods, rather to release resources.
|
||||
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`, `Succeeded`, `Failed`, `Unknown`
|
||||
- [Pod Reason](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) reasons of: `NodeAffinity`, `NodeLost`, `Shutdown`, `UnexpectedAdmissionError`
|
||||
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`, `ImagePullBackOff`, `CrashLoopBackOff`, `CreateContainerConfigError`, `ErrImagePull`, `ImagePullBackOff`, `CreateContainerError`, `InvalidImageName`
|
||||
|
||||
@@ -870,7 +960,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -956,12 +1046,16 @@ never evicted because these pods won't be recreated. (Standalone pods in failed
|
||||
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
|
||||
* In `LowNodeUtilization` and `RemovePodsViolatingInterPodAntiAffinity`, pods are evicted by their priority from low to high, and if they have same priority,
|
||||
best effort pods are evicted before burstable and guaranteed pods.
|
||||
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
|
||||
* All types of pods with the `descheduler.alpha.kubernetes.io/evict` annotation are eligible for eviction. This
|
||||
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
|
||||
Users should know how and if the pod will be recreated.
|
||||
The annotation only affects internal descheduler checks.
|
||||
The anti-disruption protection provided by the [/eviction](https://kubernetes.io/docs/concepts/scheduling-eviction/api-eviction/)
|
||||
subresource is still respected.
|
||||
* Pods with the `descheduler.alpha.kubernetes.io/prefer-no-eviction` annotation voice their preference not to be evicted.
|
||||
Each plugin decides whether the annotation gets respected or not. When the `DefaultEvictor` plugin sets `noEvictionPolicy`
|
||||
to `Mandatory` all such pods are excluded from eviction. Needs to be used with caution as some plugins may enfore
|
||||
various policies that are expected to be always met.
|
||||
* Pods with a non-nil DeletionTimestamp are not evicted by default.
|
||||
|
||||
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
|
||||
@@ -990,10 +1084,15 @@ To get best results from HA mode some additional configurations might require:
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
|-------|-------|----------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
| name | type | description |
|
||||
|---------------------------------------|--------------|-----------------------------------------------------------------------------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted, is deprecated in version v0.34.0 |
|
||||
| pods_evicted_total | CounterVec | total number of pods evicted |
|
||||
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
|
||||
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count), is deprecated in version v0.34.0 |
|
||||
| strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
@@ -1009,6 +1108,8 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.34 | v1.34 |
|
||||
| v0.33 | v1.33 |
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
| v0.30 | v1.30 |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.32.1
|
||||
appVersion: 0.32.1
|
||||
version: 0.34.0
|
||||
appVersion: 0.34.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
@@ -13,4 +13,4 @@ sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
email: sig-scheduling@kubernetes.io
|
||||
|
||||
@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -70,6 +70,10 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
|
||||
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
|
||||
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
|
||||
@@ -10,3 +10,13 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
A DeschedulerPolicy has been applied for you. You can view the policy with:
|
||||
|
||||
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
|
||||
|
||||
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
|
||||
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
|
||||
|
||||
deschedulerPolicy: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,6 +24,9 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
@@ -33,4 +36,13 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy }}
|
||||
{{- range .Values.deschedulerPolicy.metricsProviders }}
|
||||
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -10,5 +10,5 @@ data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,15 @@ kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- if .Values.cronJobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.cronJobLabels }}
|
||||
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
@@ -25,10 +32,24 @@ spec:
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
{{- if or .Values.jobAnnotations .Values.jobLabels }}
|
||||
metadata:
|
||||
{{- if .Values.jobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.jobLabels }}
|
||||
labels:
|
||||
{{- .Values.jobLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||
{{- end }}
|
||||
{{- if .Values.activeDeadlineSeconds }}
|
||||
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
@@ -67,6 +88,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@@ -100,6 +124,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
@@ -108,4 +135,7 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,6 +6,9 @@ metadata:
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.annotations }}
|
||||
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
@@ -39,6 +42,9 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
@@ -59,7 +65,9 @@ spec:
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
@@ -73,6 +81,9 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
@@ -81,6 +92,9 @@ spec:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
|
||||
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
109
charts/descheduler/tests/cronjob_annotations_test.yaml
Normal file
@@ -0,0 +1,109 @@
|
||||
suite: Test Descheduler CronJob and Job Annotations and Labels
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: adds cronJob and job annotations and labels when set
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
monitoring.company.com/scrape: "true"
|
||||
description: "test cronjob"
|
||||
cronJobLabels:
|
||||
environment: "test"
|
||||
team: "platform"
|
||||
jobAnnotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
job.company.com/retry-limit: "3"
|
||||
jobLabels:
|
||||
job-type: "maintenance"
|
||||
priority: "high"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations["monitoring.company.com/scrape"]
|
||||
value: "true"
|
||||
- equal:
|
||||
path: metadata.annotations.description
|
||||
value: "test cronjob"
|
||||
- equal:
|
||||
path: metadata.labels.environment
|
||||
value: "test"
|
||||
- equal:
|
||||
path: metadata.labels.team
|
||||
value: "platform"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
|
||||
value: "false"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
|
||||
value: "3"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-type
|
||||
value: "maintenance"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.priority
|
||||
value: "high"
|
||||
|
||||
- it: does not add cronJob and job metadata when not set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isNull:
|
||||
path: metadata.annotations
|
||||
- isNotNull:
|
||||
path: metadata.labels
|
||||
- equal:
|
||||
path: metadata.labels["app.kubernetes.io/name"]
|
||||
value: descheduler
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: does not add job metadata when job annotations and labels are empty
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
jobAnnotations: {}
|
||||
jobLabels: {}
|
||||
asserts:
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: works with all annotation and label types together
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
cron-annotation: "cron-value"
|
||||
cronJobLabels:
|
||||
cron-label: "cron-value"
|
||||
jobAnnotations:
|
||||
job-annotation: "job-value"
|
||||
jobLabels:
|
||||
job-label: "job-value"
|
||||
podAnnotations:
|
||||
pod-annotation: "pod-value"
|
||||
podLabels:
|
||||
pod-label: "pod-value"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.cron-annotation
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: metadata.labels.cron-label
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations.job-annotation
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-label
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
|
||||
value: "pod-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
|
||||
value: "pod-value"
|
||||
@@ -55,7 +55,8 @@ suspend: false
|
||||
# startingDeadlineSeconds: 200
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# ttlSecondsAfterFinished: 600
|
||||
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
@@ -89,16 +90,12 @@ cmdOptions:
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# metricsProviders:
|
||||
# - source: KubernetesMetrics
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
@@ -111,8 +108,11 @@ deschedulerPolicy:
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
@@ -197,6 +197,25 @@ serviceAccount:
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
# Opt out of API credential automounting
|
||||
#
|
||||
# automountServiceAccountToken Default is not set
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
|
||||
# Default is not set - but only implied by the ServiceAccount
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Annotations that'll be applied to deployment
|
||||
deploymentAnnotations: {}
|
||||
|
||||
cronJobAnnotations: {}
|
||||
|
||||
cronJobLabels: {}
|
||||
|
||||
jobAnnotations: {}
|
||||
|
||||
jobLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
@@ -210,8 +229,9 @@ livenessProbe:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 20
|
||||
timeoutSeconds: 5
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
@@ -248,3 +268,30 @@ serviceMonitor:
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
## Additional Volume mounts when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumeMounts:
|
||||
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
# name: kube-api-access
|
||||
# readOnly: true
|
||||
|
||||
## Additional Volumes when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumes:
|
||||
# - name: kube-api-access
|
||||
# projected:
|
||||
# defaultMode: 0444
|
||||
# sources:
|
||||
# - configMap:
|
||||
# items:
|
||||
# - key: ca.crt
|
||||
# path: ca.crt
|
||||
# name: kube-root-ca.crt
|
||||
# - downwardAPI:
|
||||
# items:
|
||||
# - fieldRef:
|
||||
# apiVersion: v1
|
||||
# fieldPath: metadata.namespace
|
||||
# path: namespace
|
||||
# - serviceAccountToken:
|
||||
# expirationSeconds: 3600
|
||||
# path: token
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -54,6 +55,7 @@ type DeschedulerServer struct {
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
PrometheusClient promapi.Client
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
@@ -142,8 +144,10 @@ func (rs *DeschedulerServer) Apply() error {
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
if secureServing != nil {
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"io"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -97,17 +98,28 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
var stoppedCh <-chan struct{}
|
||||
var err error
|
||||
if rs.SecureServingInfo != nil {
|
||||
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
defer func() {
|
||||
// we give the tracing.Shutdown() its own context as the
|
||||
// original context may have been cancelled already. we
|
||||
// have arbitrarily chosen the timeout duration.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
tracing.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
@@ -118,8 +130,10 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
if stoppedCh != nil {
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||
4. Cut release branch from `master`, eg `release-1.24`
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
**Patch release**
|
||||
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Merge Helm chart version update to release branch
|
||||
4. Perform the image promotion process for the patch version
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
### Flowchart
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.32.1 | registry.k8s.io/descheduler/descheduler:v0.32.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
141
go.mod
141
go.mod
@@ -1,53 +1,60 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.23.3
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.3
|
||||
|
||||
godebug default=go1.24
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/apiserver v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
k8s.io/code-generator v0.32.0
|
||||
k8s.io/component-base v0.32.0
|
||||
k8s.io/component-helpers v0.32.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/spf13/cobra v1.10.0
|
||||
github.com/spf13/pflag v1.0.9
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/apiserver v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/code-generator v0.34.0
|
||||
k8s.io/component-base v0.34.0
|
||||
k8s.io/component-helpers v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.32.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
k8s.io/metrics v0.34.0
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
@@ -58,74 +65,74 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.1 // indirect
|
||||
github.com/golang/glog v1.2.4 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/kms v0.32.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kms v0.34.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
|
||||
replace golang.org/x/net => golang.org/x/net v0.33.0
|
||||
|
||||
replace golang.org/x/crypto => golang.org/x/crypto v0.31.0
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
|
||||
406
go.sum
406
go.sum
@@ -1,23 +1,20 @@
|
||||
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
|
||||
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
|
||||
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
|
||||
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -36,8 +33,8 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -49,9 +46,9 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
@@ -59,10 +56,10 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
|
||||
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
@@ -108,11 +105,11 @@ github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/K
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -130,12 +127,12 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
|
||||
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -143,14 +140,14 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -158,30 +155,35 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
|
||||
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@@ -191,6 +193,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@@ -204,11 +208,13 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
@@ -236,6 +242,7 @@ github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
@@ -264,22 +271,25 @@ github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7y
|
||||
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
|
||||
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -287,16 +297,20 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/cobra v1.10.0 h1:a5/WeUlSDCvV5a45ljW2ZFtV0bTDpkfSAj3uqB6Sc+0=
|
||||
github.com/spf13/cobra v1.10.0/go.mod h1:9dhySC7dnTtEiqzmqfkLj47BslqLCUPMXjG2lj/NgoE=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.8/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -305,8 +319,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -318,49 +333,72 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
|
||||
go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
|
||||
go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
|
||||
go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
|
||||
go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
|
||||
go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
|
||||
go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
|
||||
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
|
||||
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
@@ -382,13 +420,54 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -402,18 +481,25 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -422,6 +508,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -435,19 +522,43 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
@@ -459,10 +570,12 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -488,7 +601,9 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -501,17 +616,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -523,11 +636,11 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -550,53 +663,50 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
|
||||
k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs=
|
||||
k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
|
||||
k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg=
|
||||
k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ=
|
||||
k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo=
|
||||
k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU=
|
||||
k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s=
|
||||
k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU=
|
||||
k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM=
|
||||
k8s.io/component-helpers v0.32.0 h1:pQEEBmRt3pDJJX98cQvZshDgJFeKRM4YtYkMmfOlczw=
|
||||
k8s.io/component-helpers v0.32.0/go.mod h1:9RuClQatbClcokXOcDWSzFKQm1huIf0FzQlPRpizlMc=
|
||||
k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees=
|
||||
k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0=
|
||||
k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8=
|
||||
k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg=
|
||||
k8s.io/component-helpers v0.34.0 h1:5T7P9XGMoUy1JDNKzHf0p/upYbeUf8ZaSf9jbx0QlIo=
|
||||
k8s.io/component-helpers v0.34.0/go.mod h1:kaOyl5tdtnymriYcVZg4uwDBe2d1wlIpXyDkt6sVnt4=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.32.0 h1:jwOfunHIrcdYl5FRcA+uUKKtg6qiqoPCwmS2T3XTYL4=
|
||||
k8s.io/kms v0.32.0/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||
k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns=
|
||||
k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI=
|
||||
k8s.io/kms v0.34.0 h1:u+/rcxQ3Jr7gC9AY5nXuEnBcGEB7ZOIJ9cdLdyHyEjQ=
|
||||
k8s.io/kms v0.34.0/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/metrics v0.34.0 h1:nYSfG2+tnL6/MRC2I+sGHjtNEGoEoM/KktgGOoQFwws=
|
||||
k8s.io/metrics v0.34.0/go.mod h1:KCuXmotE0v4AvoARKUP8NC4lUnbK/Du1mluGdor5h4M=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
@@ -605,19 +715,23 @@ kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPx
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.2.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.21|go1.22|go1.23') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -36,6 +36,15 @@ rules:
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -54,3 +63,16 @@ subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: descheduler-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: descheduler-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -31,10 +31,18 @@ const (
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
PodsEvictedTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
Name: "pods_evicted_total",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
@@ -49,18 +57,36 @@ var (
|
||||
)
|
||||
|
||||
DeschedulerLoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
LoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Name: "loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
|
||||
DeschedulerStrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
StrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Name: "strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
@@ -68,9 +94,12 @@ var (
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
PodsEvictedTotal,
|
||||
buildInfo,
|
||||
DeschedulerLoopDuration,
|
||||
DeschedulerStrategyDuration,
|
||||
LoopDuration,
|
||||
StrategyDuration,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -47,7 +48,17 @@ type DeschedulerPolicy struct {
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
MetricsCollector MetricsCollector
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
@@ -57,6 +68,12 @@ type Namespaces struct {
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
type EvictionLimits struct {
|
||||
// node restricts the maximum number of evictions per node
|
||||
Node *uint `json:"node,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
@@ -92,9 +109,54 @@ type PluginSet struct {
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
// Enabled metrics collection from Kubernetes metrics.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus
|
||||
}
|
||||
|
||||
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
|
||||
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
type Prometheus struct {
|
||||
URL string
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string
|
||||
// name is the name of the secret.
|
||||
Name string
|
||||
}
|
||||
|
||||
@@ -43,10 +43,20 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
@@ -74,9 +84,51 @@ type PluginSet struct {
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
// Enabled metrics collection from Kubernetes metrics server.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken `json:"authToken,omitempty"`
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference `json:"secretReference,omitempty"`
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
// name is the name of the secret.
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
138
pkg/api/v1alpha2/zz_generated.conversion.go
generated
138
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -36,6 +36,16 @@ func init() {
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
|
||||
}); err != nil {
|
||||
@@ -56,6 +66,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
@@ -81,6 +101,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
@@ -99,6 +139,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
|
||||
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
@@ -116,9 +176,9 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -139,9 +199,9 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,6 +273,28 @@ func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCo
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = api.MetricsSource(in.Source)
|
||||
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = MetricsSource(in.Source)
|
||||
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
@@ -309,3 +391,47 @@ func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins,
|
||||
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
|
||||
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
|
||||
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
|
||||
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
97
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
97
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -61,7 +82,23 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -123,6 +160,27 @@ func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
@@ -187,3 +245,40 @@ func (in *Plugins) DeepCopy() *Plugins {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
118
pkg/api/zz_generated.deepcopy.go
generated
118
pkg/api/zz_generated.deepcopy.go
generated
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -61,7 +82,23 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -107,6 +144,27 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
|
||||
*out = *in
|
||||
if in.Node != nil {
|
||||
in, out := &in.Node, &out.Node
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
|
||||
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EvictionLimits)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
@@ -123,6 +181,27 @@ func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
@@ -237,6 +316,27 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
@@ -258,3 +358,19 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -17,17 +17,30 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/common/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/transport"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
@@ -94,3 +107,61 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
func loadCAFile(filepath string) (*x509.CertPool, error) {
|
||||
caCert, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, fmt.Errorf("failed to append CA certificate to the pool")
|
||||
}
|
||||
|
||||
return caCertPool, nil
|
||||
}
|
||||
|
||||
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
|
||||
// Retrieve Pod CA cert
|
||||
caCertPool, err := loadCAFile(K8sPodCAFilePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
|
||||
}
|
||||
|
||||
// Get Prometheus Host
|
||||
u, err := url.Parse(prometheusURL)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
|
||||
}
|
||||
t := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
ServerName: u.Host,
|
||||
},
|
||||
}
|
||||
roundTripper := transport.NewBearerAuthRoundTripper(
|
||||
authToken,
|
||||
t,
|
||||
)
|
||||
|
||||
if authToken != "" {
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
|
||||
@@ -20,9 +20,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
@@ -30,18 +33,24 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -62,6 +71,11 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
prometheusAuthTokenSecretKey = "prometheusAuthToken"
|
||||
workQueueKey = "key"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
|
||||
type profileRunner struct {
|
||||
@@ -70,15 +84,21 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
namespacedSecretsLister corev1listers.SecretNamespaceLister
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
prometheusClient promapi.Client
|
||||
previousPrometheusClientTransport *http.Transport
|
||||
queue workqueue.RateLimitingInterface
|
||||
currentPrometheusAuthToken string
|
||||
metricsProviders map[api.MetricsSource]*api.MetricsProvider
|
||||
}
|
||||
|
||||
type informerResources struct {
|
||||
@@ -125,8 +145,15 @@ func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFact
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory,
|
||||
) (*descheduler, error) {
|
||||
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider {
|
||||
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
|
||||
for _, provider := range providersList {
|
||||
providersMap[provider.Source] = &provider
|
||||
}
|
||||
return providersMap
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
@@ -157,6 +184,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
@@ -164,20 +192,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metricsCollector *metricscollector.MetricsCollector
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
desch := &descheduler{
|
||||
rs: rs,
|
||||
ir: ir,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
@@ -186,8 +201,148 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
metricsCollector: metricsCollector,
|
||||
}, nil
|
||||
prometheusClient: rs.PrometheusClient,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
|
||||
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
|
||||
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
|
||||
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
if namespacedSharedInformerFactory == nil {
|
||||
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
|
||||
}
|
||||
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
|
||||
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
|
||||
}
|
||||
|
||||
return desch, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) reconcileInClusterSAToken() error {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err == nil {
|
||||
if d.currentPrometheusAuthToken != cfg.BearerToken {
|
||||
klog.V(2).Infof("Creating Prometheus client (with SA token)")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = cfg.BearerToken
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err == rest.ErrNotInCluster {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer d.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting authentication secret reconciler")
|
||||
defer klog.Infof("Shutting down authentication secret reconciler")
|
||||
|
||||
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
|
||||
for d.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
|
||||
dsKey, quit := d.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer d.queue.Done(dsKey)
|
||||
|
||||
err := d.sync()
|
||||
if err == nil {
|
||||
d.queue.Forget(dsKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
|
||||
d.queue.AddRateLimited(dsKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *descheduler) sync() error {
|
||||
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
|
||||
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
|
||||
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
ns := prometheusConfig.AuthToken.SecretReference.Namespace
|
||||
name := prometheusConfig.AuthToken.SecretReference.Name
|
||||
secretObj, err := d.namespacedSecretsLister.Get(name)
|
||||
if err != nil {
|
||||
// clear the token if the secret is not found
|
||||
if apierrors.IsNotFound(err) {
|
||||
d.currentPrometheusAuthToken = ""
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = nil
|
||||
d.prometheusClient = nil
|
||||
}
|
||||
return fmt.Errorf("unable to get %v/%v secret", ns, name)
|
||||
}
|
||||
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
|
||||
if authToken == "" {
|
||||
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
|
||||
}
|
||||
if d.currentPrometheusAuthToken == authToken {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = authToken
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
|
||||
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
@@ -196,12 +351,13 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
|
||||
return nil // gracefully skip this cycle instead of aborting
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
@@ -260,6 +416,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
ctx,
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
@@ -267,6 +424,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
frameworkprofile.WithPrometheusClient(d.prometheusClient),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -331,7 +489,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -414,6 +572,14 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
|
||||
}
|
||||
}
|
||||
|
||||
type tokenReconciliation int
|
||||
|
||||
const (
|
||||
noReconciliation tokenReconciliation = iota
|
||||
inClusterReconciliation
|
||||
secretReconciliation
|
||||
)
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
@@ -435,7 +601,22 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
var namespacedSharedInformerFactory informers.SharedInformerFactory
|
||||
metricProviderTokenReconciliation := noReconciliation
|
||||
|
||||
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
|
||||
if prometheusProvider.Prometheus.AuthToken != nil {
|
||||
// Will get reconciled
|
||||
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
|
||||
metricProviderTokenReconciliation = secretReconciliation
|
||||
} else {
|
||||
// Use the sa token and assume it has the sufficient permissions to authenticate
|
||||
metricProviderTokenReconciliation = inClusterReconciliation
|
||||
}
|
||||
}
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
@@ -444,10 +625,17 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.Start(ctx.Done())
|
||||
}
|
||||
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
if descheduler.metricsCollector != nil {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
@@ -461,7 +649,19 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
}
|
||||
}
|
||||
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
go descheduler.runAuthenticationSecretReconciler(ctx)
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
if metricProviderTokenReconciliation == inClusterReconciliation {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
if err := descheduler.reconcileInClusterSAToken(); err != nil {
|
||||
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
|
||||
@@ -136,6 +136,10 @@ func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
}
|
||||
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
var metricsSource api.MetricsSource = ""
|
||||
if metricsEnabled {
|
||||
metricsSource = api.KubernetesMetrics
|
||||
}
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
@@ -146,8 +150,8 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: metricsSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -189,7 +193,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory, nil)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
@@ -837,7 +841,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsCollector.Enabled = true
|
||||
policy.MetricsProviders = []api.MetricsProvider{{Source: api.KubernetesMetrics}}
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
|
||||
@@ -42,6 +42,12 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
deschedulerGlobalName = "sigs.k8s.io/descheduler"
|
||||
reasonAnnotationKey = "reason"
|
||||
requestedByAnnotationKey = "requested-by"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
@@ -214,6 +220,7 @@ type PodEvictor struct {
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
gracePeriodSeconds *int64
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
@@ -247,6 +254,7 @@ func NewPodEvictor(
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
gracePeriodSeconds: options.gracePeriodSeconds,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
@@ -480,6 +488,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
@@ -494,6 +503,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
@@ -508,6 +518,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
@@ -517,13 +528,14 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
return err
|
||||
}
|
||||
|
||||
ignore, err := pe.evictPod(ctx, pod)
|
||||
ignore, err := pe.evictPod(ctx, pod, opts)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
@@ -543,6 +555,7 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvictedTotal.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
@@ -562,8 +575,10 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
}
|
||||
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: pe.gracePeriodSeconds,
|
||||
}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -573,6 +588,10 @@ func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
Annotations: map[string]string{
|
||||
"reason": fmt.Sprintf("triggered by %v/%v: %v", opts.ProfileName, opts.StrategyName, opts.Reason),
|
||||
"requested-by": deschedulerGlobalName,
|
||||
},
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -114,7 +115,7 @@ func TestEvictPod(t *testing.T) {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod)
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod, EvictOptions{})
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
@@ -418,7 +419,11 @@ func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
if podName == "p1" || podName == "p2" {
|
||||
annotations := eviction.GetAnnotations()
|
||||
if (podName == "p1" || podName == "p2") && annotations[requestedByAnnotationKey] == deschedulerGlobalName && strings.HasPrefix(
|
||||
annotations[reasonAnnotationKey],
|
||||
"triggered by",
|
||||
) {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
|
||||
@@ -12,6 +12,7 @@ type Options struct {
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
gracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
@@ -46,6 +47,11 @@ func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithGracePeriodSeconds(gracePeriodSeconds *int64) *Options {
|
||||
o.gracePeriodSeconds = gracePeriodSeconds
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
|
||||
@@ -17,12 +17,17 @@ limitations under the License.
|
||||
package utils
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
EvictionKind = "Eviction"
|
||||
EvictionSubresource = "pods/eviction"
|
||||
// A new experimental feature for soft no-eviction preference.
|
||||
// Each plugin will decide whether the soft preference will be respected.
|
||||
// If configured the soft preference turns into a mandatory no-eviction policy for the DefaultEvictor plugin.
|
||||
SoftNoEvictionAnnotationKey = "descheduler.alpha.kubernetes.io/prefer-no-eviction"
|
||||
)
|
||||
|
||||
// SupportEviction uses Discovery API to find out if the server support eviction subresource
|
||||
@@ -56,3 +61,9 @@ func SupportEviction(client clientset.Interface) (string, error) {
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// HaveNoEvictionAnnotation checks if the pod have soft no-eviction annotation
|
||||
func HaveNoEvictionAnnotation(pod *corev1.Pod) bool {
|
||||
_, found := pod.ObjectMeta.Annotations[SoftNoEvictionAnnotationKey]
|
||||
return found
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +44,7 @@ type MetricsCollector struct {
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]map[v1.ResourceName]*resource.Quantity
|
||||
nodes map[string]api.ReferencedResourceList
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
@@ -55,7 +56,7 @@ func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset me
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
|
||||
nodes: make(map[string]api.ReferencedResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,13 +78,13 @@ func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
allNodesUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
|
||||
allNodesUsage[nodeName] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
@@ -92,7 +93,7 @@ func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*res
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
@@ -100,7 +101,7 @@ func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resou
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return map[v1.ResourceName]*resource.Quantity{
|
||||
return api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
@@ -131,7 +132,7 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
|
||||
mc.nodes[node.Name] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
|
||||
@@ -29,10 +29,11 @@ import (
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
|
||||
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
@@ -104,20 +105,29 @@ func IsReady(node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
|
||||
// NodeFit returns nil if the provided pod can be scheduled onto the provided node.
|
||||
// Otherwise, it returns an error explaining why the node does not fit the pod.
|
||||
//
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
// It considers a subset of the Kubernetes Scheduler's predicates
|
||||
// when deciding if a pod would fit on a node. More predicates may be added in the future.
|
||||
//
|
||||
// The checks are ordered from fastest to slowest to reduce unnecessary computation,
|
||||
// especially for nodes that are clearly unsuitable early in the evaluation process.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
// Check node selector and required affinity
|
||||
// Check if the node is marked as unschedulable.
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if the pod matches the node's label selector (nodeSelector) and required node affinity rules.
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
// Check taints on the node that have effect NoSchedule or NoExecute.
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
@@ -125,25 +135,21 @@ func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
}
|
||||
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
// Check if the pod violates any inter-pod anti-affinity rules with existing pods on the node.
|
||||
// This involves iterating over all pods assigned to the node and evaluating label selectors.
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
// Check whether the node has enough available resources to accommodate the pod.
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,7 +219,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
resourceNames := []v1.ResourceName{v1.ResourcePods}
|
||||
for name := range podRequests {
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
@@ -236,7 +242,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if availableResources[v1.ResourcePods].MilliValue() <= 0 {
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
@@ -244,7 +250,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -253,13 +259,18 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||
}
|
||||
remainingResources := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
if IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
remainingResources[name] = resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI)
|
||||
}
|
||||
} else {
|
||||
if _, exists := node.Status.Allocatable[name]; exists {
|
||||
allocatableResource := node.Status.Allocatable[name]
|
||||
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||
@@ -273,14 +284,17 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
totalUtilization := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
totalUtilization := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
totalUtilization[name] = resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
totalUtilization[name] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
default:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -254,14 +255,32 @@ func SortPodsBasedOnPriorityLowToHigh(pods []*v1.Pod) {
|
||||
return false
|
||||
}
|
||||
if (pods[j].Spec.Priority == nil && pods[i].Spec.Priority == nil) || (*pods[i].Spec.Priority == *pods[j].Spec.Priority) {
|
||||
if IsBestEffortPod(pods[i]) {
|
||||
iIsBestEffortPod := IsBestEffortPod(pods[i])
|
||||
jIsBestEffortPod := IsBestEffortPod(pods[j])
|
||||
iIsBurstablePod := IsBurstablePod(pods[i])
|
||||
jIsBurstablePod := IsBurstablePod(pods[j])
|
||||
iIsGuaranteedPod := IsGuaranteedPod(pods[i])
|
||||
jIsGuaranteedPod := IsGuaranteedPod(pods[j])
|
||||
if (iIsBestEffortPod && jIsBestEffortPod) || (iIsBurstablePod && jIsBurstablePod) || (iIsGuaranteedPod && jIsGuaranteedPod) {
|
||||
iHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[i])
|
||||
jHasNoEvictonPolicy := evictionutils.HaveNoEvictionAnnotation(pods[j])
|
||||
if !iHasNoEvictonPolicy {
|
||||
return true
|
||||
}
|
||||
if !jHasNoEvictonPolicy {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
if IsBurstablePod(pods[i]) && IsGuaranteedPod(pods[j]) {
|
||||
if iIsBestEffortPod {
|
||||
return true
|
||||
}
|
||||
if iIsBurstablePod && jIsGuaranteedPod {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return *pods[i].Spec.Priority < *pods[j].Spec.Priority
|
||||
})
|
||||
}
|
||||
|
||||
@@ -117,6 +117,14 @@ func TestListPodsOnANode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getPodListNames(pods []*v1.Pod) []string {
|
||||
names := []string{}
|
||||
for _, pod := range pods {
|
||||
names = append(names, pod.Name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
n1 := test.BuildTestNode("n1", 4000, 3000, 9, nil)
|
||||
|
||||
@@ -149,11 +157,70 @@ func TestSortPodsBasedOnPriorityLowToHigh(t *testing.T) {
|
||||
p6 := test.BuildTestPod("p6", 400, 100, n1.Name, test.MakeGuaranteedPod)
|
||||
p6.Spec.Priority = nil
|
||||
|
||||
podList := []*v1.Pod{p4, p3, p2, p1, p6, p5}
|
||||
p7 := test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, lowPriority)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// BestEffort
|
||||
p8 := test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBestEffortPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p9 := test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Guaranteed
|
||||
p10 := test.BuildTestPod("p10", 400, 100, n1.Name, func(pod *v1.Pod) {
|
||||
test.SetPodPriority(pod, highPriority)
|
||||
test.MakeGuaranteedPod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p11 := test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
})
|
||||
|
||||
// Burstable
|
||||
p12 := test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
test.MakeBurstablePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/prefer-no-eviction": "",
|
||||
}
|
||||
})
|
||||
|
||||
podList := []*v1.Pod{p1, p8, p9, p10, p2, p3, p4, p5, p6, p7, p11, p12}
|
||||
// p5: no priority, best effort
|
||||
// p11: no priority, burstable
|
||||
// p6: no priority, guaranteed
|
||||
// p1: low priority
|
||||
// p7: low priority, prefer-no-eviction
|
||||
// p2: high priority, best effort
|
||||
// p8: high priority, best effort, prefer-no-eviction
|
||||
// p3: high priority, burstable
|
||||
// p9: high priority, burstable, prefer-no-eviction
|
||||
// p4: high priority, guaranteed
|
||||
// p10: high priority, guaranteed, prefer-no-eviction
|
||||
expectedPodList := []*v1.Pod{p5, p11, p12, p6, p1, p7, p2, p8, p3, p9, p4, p10}
|
||||
|
||||
SortPodsBasedOnPriorityLowToHigh(podList)
|
||||
if !reflect.DeepEqual(podList[len(podList)-1], p4) {
|
||||
t.Errorf("Expected last pod in sorted list to be %v which of highest priority and guaranteed but got %v", p4, podList[len(podList)-1])
|
||||
if !reflect.DeepEqual(getPodListNames(podList), getPodListNames(expectedPodList)) {
|
||||
t.Errorf("Pods were sorted in an unexpected order: %v, expected %v", getPodListNames(podList), getPodListNames(expectedPodList))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -62,21 +63,22 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setDefaults(*internalPolicy, registry, client)
|
||||
|
||||
return internalPolicy, nil
|
||||
return setDefaults(*internalPolicy, registry, client)
|
||||
}
|
||||
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) *api.DeschedulerPolicy {
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) (*api.DeschedulerPolicy, error) {
|
||||
var err error
|
||||
for idx, profile := range in.Profiles {
|
||||
// If we need to set defaults coming from loadtime in each profile we do it here
|
||||
in.Profiles[idx] = setDefaultEvictor(profile, client)
|
||||
in.Profiles[idx], err = setDefaultEvictor(profile, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
setDefaultsPluginConfig(&pluginConfig, registry)
|
||||
}
|
||||
}
|
||||
return &in
|
||||
return &in, nil
|
||||
}
|
||||
|
||||
func setDefaultsPluginConfig(pluginConfig *api.PluginConfig, registry pluginregistry.Registry) {
|
||||
@@ -97,7 +99,7 @@ func findPluginName(names []string, key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) api.DeschedulerProfile {
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) (api.DeschedulerProfile, error) {
|
||||
newPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
@@ -106,6 +108,10 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
IgnorePvcPods: false,
|
||||
EvictFailedBarePods: false,
|
||||
IgnorePodsWithoutPDB: false,
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{},
|
||||
ExtraEnabled: []defaultevictor.PodProtection{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -128,18 +134,19 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), client, defaultevictorPluginConfig.Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold)
|
||||
if err != nil {
|
||||
klog.Error(err, "Failed to get threshold priority from args")
|
||||
return profile, err
|
||||
}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold = &api.PriorityThreshold{}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold.Value = &thresholdPriority
|
||||
return profile
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
|
||||
var errorsInProfiles []error
|
||||
var errorsInPolicy []error
|
||||
for _, profile := range in.Profiles {
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -148,9 +155,41 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
|
||||
continue
|
||||
}
|
||||
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errorsInProfiles)
|
||||
providers := map[api.MetricsSource]api.MetricsProvider{}
|
||||
for _, provider := range in.MetricsProviders {
|
||||
if _, ok := providers[provider.Source]; ok {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("metric provider %q is already configured, each source can be configured only once", provider.Source))
|
||||
} else {
|
||||
providers[provider.Source] = provider
|
||||
}
|
||||
}
|
||||
if _, exists := providers[api.KubernetesMetrics]; exists && in.MetricsCollector != nil && in.MetricsCollector.Enabled {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"))
|
||||
}
|
||||
if prometheusConfig, exists := providers[api.PrometheusMetrics]; exists {
|
||||
if prometheusConfig.Prometheus == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus configuration is required when prometheus source is enabled"))
|
||||
} else {
|
||||
if prometheusConfig.Prometheus.URL == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
|
||||
} else if _, err := url.Parse(prometheusConfig.Prometheus.URL); err != nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
|
||||
}
|
||||
|
||||
if prometheusConfig.Prometheus.AuthToken != nil {
|
||||
secretRef := prometheusConfig.Prometheus.AuthToken.SecretReference
|
||||
if secretRef == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"))
|
||||
} else if secretRef.Name == "" || secretRef.Namespace == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errorsInPolicy)
|
||||
}
|
||||
|
||||
@@ -17,10 +17,12 @@ limitations under the License.
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
@@ -121,6 +123,25 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal, validate error handling (priorityThreshold exceeding maximum)",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
priorityThreshold:
|
||||
value: 2000000001
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
`),
|
||||
result: nil,
|
||||
err: errors.New("priority threshold can't be greater than 2000000000"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -191,12 +212,153 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
|
||||
},
|
||||
{
|
||||
description: "Duplicit metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("metric provider \"KubernetesMetrics\" is already configured, each source can be configured only once"),
|
||||
},
|
||||
{
|
||||
description: "Too many metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsCollector: &api.MetricsCollector{
|
||||
Enabled: true,
|
||||
},
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"),
|
||||
},
|
||||
{
|
||||
description: "missing prometheus url error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus URL is required when prometheus is enabled"),
|
||||
},
|
||||
{
|
||||
description: "prometheus url is not valid error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "http://example.com:-80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with no secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with empty secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference namespace error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference name error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "valid prometheus authtoken secret reference",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
result := validateDeschedulerConfiguration(tc.deschedulerPolicy, pluginregistry.PluginRegistry)
|
||||
if result.Error() != tc.result.Error() {
|
||||
if result == nil && tc.result != nil || result != nil && tc.result == nil {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
} else if result == nil && tc.result == nil {
|
||||
return
|
||||
} else if result.Error() != tc.result.Error() {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
}
|
||||
})
|
||||
@@ -335,6 +497,313 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test podProtections extraEnabled configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test both ExtraPodProtections and DisabledDefaultPodProtections configuration",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "ProfileName",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
PodProtections: defaultevictor.PodProtections{
|
||||
ExtraEnabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithPVC,
|
||||
defaultevictor.PodsWithoutPDB,
|
||||
},
|
||||
DefaultDisabled: []defaultevictor.PodProtection{
|
||||
defaultevictor.PodsWithLocalStorage,
|
||||
defaultevictor.DaemonSetPods,
|
||||
},
|
||||
},
|
||||
PriorityThreshold: &api.PriorityThreshold{Value: utilptr.To[int32](2000000000)},
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
PreEvictionFilter: api.PluginSet{
|
||||
Enabled: []string{defaultevictor.PluginName},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "test error when using both Deprecated fields and DisabledDefaultPodProtections/ExtraPodProtections",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
evictSystemCriticalPods: true
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- "PodsWithoutPDB"
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables a default protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"InvalidProtection\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables an extra protection that does not exist",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "InvalidProtection"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"InvalidProtection\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Disables an extra protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithPVC"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in DefaultDisabled: \"PodsWithPVC\". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]"),
|
||||
},
|
||||
{
|
||||
description: "test error when Enables a default protection",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
podProtections:
|
||||
extraEnabled:
|
||||
- "DaemonSetPods"
|
||||
priorityThreshold:
|
||||
value: 2000000000
|
||||
nodeFit: true
|
||||
plugins:
|
||||
filter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
preEvictionFilter:
|
||||
enabled:
|
||||
- "DefaultEvictor"
|
||||
`),
|
||||
result: nil,
|
||||
err: fmt.Errorf("in profile ProfileName: invalid pod protection policy in ExtraEnabled: \"DaemonSetPods\". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -342,14 +811,14 @@ profiles:
|
||||
result, err := decode("filename", tc.policy, client, pluginregistry.PluginRegistry)
|
||||
if err != nil {
|
||||
if tc.err == nil {
|
||||
t.Errorf("unexpected error: %s.", err.Error())
|
||||
} else {
|
||||
t.Errorf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
t.Fatalf("unexpected error: %s.", err.Error())
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("unexpected error: %s. Was expecting %s", err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
diff := cmp.Diff(tc.result, result)
|
||||
if diff != "" && err == nil {
|
||||
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
if diff != "" {
|
||||
t.Fatalf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
type HandleImpl struct {
|
||||
@@ -20,6 +22,7 @@ type HandleImpl struct {
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
PrometheusClientImpl promapi.Client
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -28,6 +31,10 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) PrometheusClient() promapi.Client {
|
||||
return hi.PrometheusClientImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.MetricsCollectorImpl
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ type FakePlugin struct {
|
||||
}
|
||||
|
||||
func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -74,7 +74,7 @@ func NewPluginFncFromFake(fp *FakePlugin) pluginregistry.PluginBuilder {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakePluginArgs, got %T", args)
|
||||
@@ -165,7 +165,7 @@ type FakeDeschedulePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeDeschedulePluginFncFromFake(fp *FakeDeschedulePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeDeschedulePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeDeschedulePluginArgs, got %T", args)
|
||||
@@ -252,7 +252,7 @@ type FakeBalancePlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeBalancePluginFncFromFake(fp *FakeBalancePlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeBalancePluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeBalancePluginArgs, got %T", args)
|
||||
@@ -339,7 +339,7 @@ type FakeFilterPlugin struct {
|
||||
}
|
||||
|
||||
func NewFakeFilterPluginFncFromFake(fp *FakeFilterPlugin) pluginregistry.PluginBuilder {
|
||||
return func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
return func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
fakePluginArgs, ok := args.(*FakeFilterPluginArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type FakeFilterPluginArgs, got %T", args)
|
||||
|
||||
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package pluginregistry
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -35,7 +37,7 @@ type PluginUtilities struct {
|
||||
PluginArgDefaulter PluginArgDefaulter
|
||||
}
|
||||
|
||||
type PluginBuilder = func(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
type PluginBuilder = func(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error)
|
||||
|
||||
type (
|
||||
PluginArgValidator = func(args runtime.Object) error
|
||||
|
||||
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
94
pkg/framework/plugins/defaultevictor/constraints.go
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
func evictionConstraintsForLabelSelector(logger klog.Logger, labelSelector *metav1.LabelSelector) ([]constraint, error) {
|
||||
if labelSelector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get selector from label selector")
|
||||
return nil, err
|
||||
}
|
||||
if !selector.Empty() {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinReplicas(logger klog.Logger, minReplicas uint, handle frameworktypes.Handle) ([]constraint, error) {
|
||||
if minReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
logger.Error(err, "could not get pod indexer by ownerRefs")
|
||||
return nil, err
|
||||
}
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
logger.V(5).Info("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
if uint(len(objs)) < minReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), minReplicas)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func evictionConstraintsForMinPodAge(minPodAge *metav1.Duration) []constraint {
|
||||
if minPodAge != nil {
|
||||
return []constraint{
|
||||
func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < minPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", minPodAge.String())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -14,19 +14,18 @@ limitations under the License.
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
// "context"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -48,6 +47,7 @@ type constraint func(pod *v1.Pod) error
|
||||
// This plugin is only meant to customize other actions (extension points) of the evictor,
|
||||
// like filtering, sorting, and other ones that might be relevant in the future
|
||||
type DefaultEvictor struct {
|
||||
logger klog.Logger
|
||||
args *DefaultEvictorArgs
|
||||
constraints []constraint
|
||||
handle frameworktypes.Handle
|
||||
@@ -66,150 +66,238 @@ func HaveEvictAnnotation(pod *v1.Pod) bool {
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
// nolint: gocyclo
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
defaultEvictorArgs, ok := args.(*DefaultEvictorArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
ev := &DefaultEvictor{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: defaultEvictorArgs,
|
||||
}
|
||||
// add constraints
|
||||
err := ev.addAllConstraints(logger, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.EvictFailedBarePods {
|
||||
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
func (d *DefaultEvictor) addAllConstraints(logger klog.Logger, handle frameworktypes.Handle) error {
|
||||
args := d.args
|
||||
// Determine effective protected policies based on the provided arguments.
|
||||
effectivePodProtections := getEffectivePodProtections(args)
|
||||
|
||||
if err := applyEffectivePodProtections(d, effectivePodProtections, handle); err != nil {
|
||||
return fmt.Errorf("failed to apply effective protected policies: %w", err)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForLabelSelector(logger, args.LabelSelector); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
if constraints, err := evictionConstraintsForMinReplicas(logger, args.MinReplicas, handle); err != nil {
|
||||
return err
|
||||
} else {
|
||||
d.constraints = append(d.constraints, constraints...)
|
||||
}
|
||||
d.constraints = append(d.constraints, evictionConstraintsForMinPodAge(args.MinPodAge)...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyEffectivePodProtections configures the evictor with specified Pod protection.
|
||||
func applyEffectivePodProtections(d *DefaultEvictor, podProtections []PodProtection, handle frameworktypes.Handle) error {
|
||||
protectionMap := make(map[PodProtection]bool, len(podProtections))
|
||||
for _, protection := range podProtections {
|
||||
protectionMap[protection] = true
|
||||
}
|
||||
|
||||
// Apply protections
|
||||
if err := applySystemCriticalPodsProtection(d, protectionMap, handle); err != nil {
|
||||
return err
|
||||
}
|
||||
applyFailedBarePodsProtection(d, protectionMap)
|
||||
applyLocalStoragePodsProtection(d, protectionMap)
|
||||
applyDaemonSetPodsProtection(d, protectionMap)
|
||||
applyPvcPodsProtection(d, protectionMap)
|
||||
applyPodsWithoutPDBProtection(d, protectionMap, handle)
|
||||
applyPodsWithResourceClaimsProtection(d, protectionMap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyFailedBarePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[FailedBarePods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
// Enable evictFailedBarePods to evict bare pods in failed phase
|
||||
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
|
||||
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if len(ownerRefList) == 0 {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if len(podutil.OwnerRef(pod)) == 0 {
|
||||
return fmt.Errorf("pod does not have any ownerRefs")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictSystemCriticalPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.PriorityThreshold != nil && (defaultEvictorArgs.PriorityThreshold.Value != nil || len(defaultEvictorArgs.PriorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), defaultEvictorArgs.PriorityThreshold)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get priority threshold: %v", err)
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
})
|
||||
}
|
||||
} else {
|
||||
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
|
||||
func applySystemCriticalPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) error {
|
||||
isProtectionEnabled := protectionMap[SystemCriticalPods]
|
||||
if !isProtectionEnabled {
|
||||
d.logger.V(1).Info("Warning: System critical pod protection is disabled. This could cause eviction of Kubernetes system pods.")
|
||||
return nil
|
||||
}
|
||||
if !defaultEvictorArgs.EvictLocalStoragePods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and descheduler is not configured with evictLocalStoragePods")
|
||||
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsCriticalPriorityPod(pod) {
|
||||
return fmt.Errorf("pod has system critical priority and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
priorityThreshold := d.args.PriorityThreshold
|
||||
if priorityThreshold != nil && (priorityThreshold.Value != nil || len(priorityThreshold.Name) > 0) {
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), handle.ClientSet(), priorityThreshold)
|
||||
if err != nil {
|
||||
d.logger.Error(err, "failed to get priority threshold")
|
||||
return err
|
||||
}
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if !IsPodEvictableBasedOnPriority(pod, thresholdPriority) {
|
||||
return fmt.Errorf("pod has higher priority than specified priority class threshold")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if !defaultEvictorArgs.EvictDaemonSetPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyLocalStoragePodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithLocalStorage]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithLocalStorage(pod) {
|
||||
return fmt.Errorf("pod has local storage and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func applyDaemonSetPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[DaemonSetPods]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
ownerRefList := podutil.OwnerRef(pod)
|
||||
if utils.IsDaemonsetPod(ownerRefList) {
|
||||
return fmt.Errorf("pod is related to daemonset and descheduler is not configured with evictDaemonSetPods")
|
||||
return fmt.Errorf("daemonset pods are protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if defaultEvictorArgs.IgnorePvcPods {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
}
|
||||
|
||||
func applyPvcPodsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithPVC]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithPVC(pod) {
|
||||
return fmt.Errorf("pod has a PVC and descheduler is configured to ignore PVC pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
selector, err := metav1.LabelSelectorAsSelector(defaultEvictorArgs.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get selector from label selector")
|
||||
}
|
||||
if defaultEvictorArgs.LabelSelector != nil && !selector.Empty() {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return fmt.Errorf("pod labels do not match the labelSelector filter in the policy parameter")
|
||||
return fmt.Errorf("pod with PVC is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinReplicas > 1 {
|
||||
indexName := "metadata.ownerReferences"
|
||||
indexer, err := getPodIndexerByOwnerRefs(indexName, handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(pod.OwnerReferences) > 1 {
|
||||
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
ownerRef := pod.OwnerReferences[0]
|
||||
objs, err := indexer.ByIndex(indexName, string(ownerRef.UID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list pods for minReplicas filter in the policy parameter")
|
||||
}
|
||||
|
||||
if uint(len(objs)) < defaultEvictorArgs.MinReplicas {
|
||||
return fmt.Errorf("owner has %d replicas which is less than minReplicas of %d", len(objs), defaultEvictorArgs.MinReplicas)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.MinPodAge != nil {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if pod.Status.StartTime == nil || time.Since(pod.Status.StartTime.Time) < defaultEvictorArgs.MinPodAge.Duration {
|
||||
return fmt.Errorf("pod age is not older than MinPodAge: %s seconds", defaultEvictorArgs.MinPodAge.String())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if defaultEvictorArgs.IgnorePodsWithoutPDB {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
func applyPodsWithoutPDBProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool, handle frameworktypes.Handle) {
|
||||
isProtectionEnabled := protectionMap[PodsWithoutPDB]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
|
||||
}
|
||||
if !hasPdb {
|
||||
return fmt.Errorf("no PodDisruptionBudget found for pod")
|
||||
return fmt.Errorf("pod does not have a PodDisruptionBudget and is protected against eviction")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
func applyPodsWithResourceClaimsProtection(d *DefaultEvictor, protectionMap map[PodProtection]bool) {
|
||||
isProtectionEnabled := protectionMap[PodsWithResourceClaims]
|
||||
if isProtectionEnabled {
|
||||
d.constraints = append(d.constraints, func(pod *v1.Pod) error {
|
||||
if utils.IsPodWithResourceClaims(pod) {
|
||||
return fmt.Errorf("pod has ResourceClaims and descheduler is configured to protect ResourceClaims pods")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// getEffectivePodProtections determines which policies are currently active.
|
||||
// It supports both new-style (PodProtections) and legacy-style flags.
|
||||
func getEffectivePodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
// determine whether to use PodProtections config
|
||||
useNewConfig := len(args.PodProtections.DefaultDisabled) > 0 || len(args.PodProtections.ExtraEnabled) > 0
|
||||
|
||||
if !useNewConfig {
|
||||
// fall back to the Deprecated config
|
||||
return legacyGetPodProtections(args)
|
||||
}
|
||||
|
||||
// effective is the final list of active protection.
|
||||
effective := make([]PodProtection, 0)
|
||||
effective = append(effective, defaultPodProtections...)
|
||||
|
||||
// Remove PodProtections that are in the DefaultDisabled list.
|
||||
effective = slices.DeleteFunc(effective, func(protection PodProtection) bool {
|
||||
return slices.Contains(args.PodProtections.DefaultDisabled, protection)
|
||||
})
|
||||
|
||||
// Add extra enabled in PodProtections
|
||||
effective = append(effective, args.PodProtections.ExtraEnabled...)
|
||||
|
||||
return effective
|
||||
}
|
||||
|
||||
// legacyGetPodProtections returns protections using deprecated boolean flags.
|
||||
func legacyGetPodProtections(args *DefaultEvictorArgs) []PodProtection {
|
||||
var protections []PodProtection
|
||||
|
||||
// defaultDisabled
|
||||
if !args.EvictLocalStoragePods {
|
||||
protections = append(protections, PodsWithLocalStorage)
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
protections = append(protections, DaemonSetPods)
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
protections = append(protections, SystemCriticalPods)
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
protections = append(protections, FailedBarePods)
|
||||
}
|
||||
|
||||
// extraEnabled
|
||||
if args.IgnorePvcPods {
|
||||
protections = append(protections, PodsWithPVC)
|
||||
}
|
||||
if args.IgnorePodsWithoutPDB {
|
||||
protections = append(protections, PodsWithoutPDB)
|
||||
}
|
||||
return protections
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
@@ -218,14 +306,15 @@ func (d *DefaultEvictor) Name() string {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
|
||||
if d.args.NodeFit {
|
||||
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
|
||||
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -234,12 +323,17 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
|
||||
checkErrs := []error{}
|
||||
|
||||
if HaveEvictAnnotation(pod) {
|
||||
return true
|
||||
}
|
||||
|
||||
if d.args.NoEvictionPolicy == MandatoryNoEvictionPolicy && evictionutils.HaveNoEvictionAnnotation(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
if utils.IsMirrorPod(pod) {
|
||||
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
|
||||
}
|
||||
@@ -259,7 +353,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
|
||||
}
|
||||
|
||||
if len(checkErrs) > 0 {
|
||||
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -16,19 +16,21 @@ package defaultevictor
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
evictionutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworkfake "sigs.k8s.io/descheduler/pkg/framework/fake"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
@@ -44,12 +46,15 @@ type testCase struct {
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
ignorePvcPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
minPodAge *metav1.Duration
|
||||
result bool
|
||||
ignorePodsWithoutPDB bool
|
||||
podProtections PodProtections
|
||||
noEvictionPolicy NoEvictionPolicy
|
||||
}
|
||||
|
||||
func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
@@ -89,10 +94,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
nodeFit: true,
|
||||
}, {
|
||||
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
@@ -127,10 +129,8 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector",
|
||||
pods: []*v1.Pod{
|
||||
@@ -153,10 +153,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
nodeFit: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector",
|
||||
pods: []*v1.Pod{
|
||||
@@ -179,10 +176,8 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
||||
pods: []*v1.Pod{
|
||||
@@ -205,10 +200,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
nodeFit: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, and one node has enough memory",
|
||||
pods: []*v1.Pod{
|
||||
@@ -241,10 +233,8 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
||||
pods: []*v1.Pod{
|
||||
@@ -277,10 +267,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
nodeFit: true,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector, but nodefit false, should still be evicted",
|
||||
pods: []*v1.Pod{
|
||||
@@ -303,10 +290,7 @@ func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: false,
|
||||
result: true,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -348,14 +332,13 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
}),
|
||||
},
|
||||
evictFailedBarePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with no ownerRefs and evictFailedBarePods enabled",
|
||||
pods: []*v1.Pod{test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil)},
|
||||
evictFailedBarePods: true,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Failed pod eviction with no ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -364,70 +347,90 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
},
|
||||
evictFailedBarePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with normal ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with normal ownerRefs and " + evictPodAnnotationKey + " annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with normal ownerRefs and " + evictionutils.SoftNoEvictionAnnotationKey + " annotation (preference)",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{evictionutils.SoftNoEvictionAnnotationKey: ""}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with normal ownerRefs and " + evictionutils.SoftNoEvictionAnnotationKey + " annotation (mandatory)",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{evictionutils.SoftNoEvictionAnnotationKey: ""}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
noEvictionPolicy: MandatoryNoEvictionPolicy,
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with replicaSet ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with replicaSet ownerRefs and " + evictPodAnnotationKey + " annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p4", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with statefulSet ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Normal pod eviction with statefulSet ownerRefs and " + evictPodAnnotationKey + " annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod not evicted because it is bound to a PV and evictLocalStoragePods = false",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p5", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -445,10 +448,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p6", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -466,14 +467,14 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
evictLocalStoragePods: true,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
@@ -488,10 +489,9 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod not evicted because it is part of a daemonSet",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -499,21 +499,18 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod not evicted because it is a mirror poddsa",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -521,22 +518,19 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
pod.Annotations[evictPodAnnotationKey] = "true"
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod not evicted because it has system critical priority",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -545,10 +539,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -556,14 +548,13 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
evictPodAnnotationKey: "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod not evicted because it has a priority higher than the configured priority threshold",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -571,24 +562,21 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
priorityThreshold: &lowPriority,
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -597,23 +585,23 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -621,24 +609,24 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod is evicted because it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Annotations = map[string]string{evictPodAnnotationKey: "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted, no PreEvictionFilter, should ignore nodeFit",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -665,11 +653,10 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "minReplicas of 2, owner with 2 replicas, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -683,7 +670,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "minReplicas of 3, owner with 2 replicas, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -696,8 +684,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
minReplicas: 3,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "minReplicas of 2, multiple owners, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -710,7 +698,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
},
|
||||
minReplicas: 2,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "minPodAge of 50, pod created 10 minutes ago, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -720,8 +709,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "minPodAge of 50, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -732,7 +721,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
},
|
||||
minPodAge: &minPodAge,
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "nil minPodAge, pod created 60 minutes ago, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -742,7 +732,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "ignorePodsWithoutPDB, pod with no PDBs, no eviction",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -753,8 +744,8 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
}),
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: false,
|
||||
}, {
|
||||
},
|
||||
{
|
||||
description: "ignorePodsWithoutPDB, pod with PDBs, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
|
||||
@@ -770,6 +761,124 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "ignorePvcPods is set, pod with PVC, not evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
ignorePvcPods: true,
|
||||
},
|
||||
{
|
||||
description: "ignorePvcPods is not set, pod with PVC, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod with local storage is evicted because 'PodsWithLocalStorage' is in DefaultDisabled",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "local-storage", VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
podProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage},
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "DaemonSet pod is evicted because 'DaemonSetPods' is in DefaultDisabled",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
Kind: "DaemonSet",
|
||||
Name: "daemonset-test",
|
||||
UID: "daemonset-uid",
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
podProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{DaemonSetPods},
|
||||
},
|
||||
result: true,
|
||||
},
|
||||
{
|
||||
description: "Pod with PVC is not evicted because 'PodsWithPVC' is in ExtraEnabled",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p20", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
podProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
description: "Pod without PDB is not evicted because 'PodsWithoutPDB' is in ExtraEnabled",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p21", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
podProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithoutPDB},
|
||||
},
|
||||
result: false,
|
||||
},
|
||||
{
|
||||
description: "Pod with ResourceClaims is not evicted because 'PodsWithResourceClaims' is in ExtraEnabled",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p20", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.ResourceClaims = []v1.PodResourceClaim{
|
||||
{
|
||||
Name: "test-claim",
|
||||
ResourceClaimName: ptr.To("test-resource-claim"),
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
podProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithResourceClaims},
|
||||
},
|
||||
result: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
@@ -825,7 +934,7 @@ func TestReinitialization(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("Unable to initialize as a DefaultEvictor plugin")
|
||||
}
|
||||
_, err = New(defaultEvictor.args, defaultEvictor.handle)
|
||||
_, err = New(ctx, defaultEvictor.args, defaultEvictor.handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to reinitialize the plugin: %v", err)
|
||||
}
|
||||
@@ -862,7 +971,7 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
IgnorePvcPods: test.ignorePvcPods,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
@@ -871,9 +980,12 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
MinReplicas: test.minReplicas,
|
||||
MinPodAge: test.minPodAge,
|
||||
IgnorePodsWithoutPDB: test.ignorePodsWithoutPDB,
|
||||
NoEvictionPolicy: test.noEvictionPolicy,
|
||||
PodProtections: test.podProtections,
|
||||
}
|
||||
|
||||
evictorPlugin, err := New(
|
||||
ctx,
|
||||
defaultEvictorArgs,
|
||||
&frameworkfake.HandleImpl{
|
||||
ClientsetImpl: fakeClient,
|
||||
@@ -886,3 +998,122 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
|
||||
return evictorPlugin, nil
|
||||
}
|
||||
|
||||
func TestGetEffectivePodProtections_TableDriven(t *testing.T) {
|
||||
// Prepare the default set for easy reference
|
||||
defaultSet := defaultPodProtections
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args *DefaultEvictorArgs
|
||||
wantResult []PodProtection
|
||||
}{
|
||||
{
|
||||
name: "NewConfig_EmptyConfig_ReturnsDefault",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
wantResult: defaultSet,
|
||||
},
|
||||
{
|
||||
name: "NewConfig_DisableOneDefault_ReturnsDefaultMinusOne",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
wantResult: []PodProtection{DaemonSetPods, SystemCriticalPods, FailedBarePods},
|
||||
},
|
||||
{
|
||||
name: "NewConfig_DisableMultipleDefaults_ReturnsDefaultMinusMultiple",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{DaemonSetPods, SystemCriticalPods},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
wantResult: []PodProtection{PodsWithLocalStorage, FailedBarePods},
|
||||
},
|
||||
{
|
||||
name: "NewConfig_EnableOneExtra_ReturnsDefaultPlusOne",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
},
|
||||
wantResult: append(defaultSet, PodsWithPVC),
|
||||
},
|
||||
{
|
||||
name: "NewConfig_EnableMultipleExtra_ReturnsDefaultPlusMultiple",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithoutPDB},
|
||||
},
|
||||
},
|
||||
wantResult: append(defaultSet, PodsWithPVC, PodsWithoutPDB),
|
||||
},
|
||||
{
|
||||
name: "NewConfig_DisableAndEnable_ReturnsModifiedSet",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{FailedBarePods, DaemonSetPods},
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
},
|
||||
wantResult: []PodProtection{PodsWithLocalStorage, SystemCriticalPods, PodsWithPVC},
|
||||
},
|
||||
{
|
||||
name: "NewConfig_EnableOneExtra(PodsWithResourceClaims)_ReturnsDefaultPlusOne",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{PodsWithResourceClaims},
|
||||
},
|
||||
},
|
||||
wantResult: append(defaultSet, PodsWithResourceClaims),
|
||||
},
|
||||
{
|
||||
name: "NewConfig_DisableAndEnable_ReturnsModifiedSet",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{FailedBarePods, DaemonSetPods},
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithResourceClaims},
|
||||
},
|
||||
},
|
||||
wantResult: []PodProtection{PodsWithLocalStorage, SystemCriticalPods, PodsWithPVC, PodsWithResourceClaims},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getEffectivePodProtections(tt.args)
|
||||
|
||||
if !slicesEqualUnordered(tt.wantResult, got) {
|
||||
t.Errorf("getEffectivePodProtections() = %v, want %v", got, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func slicesEqualUnordered(expected, actual []PodProtection) bool {
|
||||
if len(expected) != len(actual) {
|
||||
return false
|
||||
}
|
||||
for _, exp := range expected {
|
||||
if !slices.Contains(actual, exp) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, act := range actual {
|
||||
if !slices.Contains(expected, act) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -21,35 +21,7 @@ func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_DefaultEvictorArgs
|
||||
// TODO: the final default values would be discussed in community
|
||||
// SetDefaults_DefaultEvictorArgs sets the default values for the
|
||||
// DefaultEvictorArgs configuration.
|
||||
func SetDefaults_DefaultEvictorArgs(obj runtime.Object) {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
if args.NodeSelector == "" {
|
||||
args.NodeSelector = ""
|
||||
}
|
||||
if !args.EvictLocalStoragePods {
|
||||
args.EvictLocalStoragePods = false
|
||||
}
|
||||
if !args.EvictDaemonSetPods {
|
||||
args.EvictDaemonSetPods = false
|
||||
}
|
||||
if !args.EvictSystemCriticalPods {
|
||||
args.EvictSystemCriticalPods = false
|
||||
}
|
||||
if !args.IgnorePvcPods {
|
||||
args.IgnorePvcPods = false
|
||||
}
|
||||
if !args.EvictFailedBarePods {
|
||||
args.EvictFailedBarePods = false
|
||||
}
|
||||
if args.LabelSelector == nil {
|
||||
args.LabelSelector = nil
|
||||
}
|
||||
if args.PriorityThreshold == nil {
|
||||
args.PriorityThreshold = nil
|
||||
}
|
||||
if !args.NodeFit {
|
||||
args.NodeFit = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
@@ -25,16 +25,101 @@ import (
|
||||
type DefaultEvictorArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
NodeSelector string `json:"nodeSelector,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
PriorityThreshold *api.PriorityThreshold `json:"priorityThreshold,omitempty"`
|
||||
NodeFit bool `json:"nodeFit,omitempty"`
|
||||
MinReplicas uint `json:"minReplicas,omitempty"`
|
||||
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
|
||||
NoEvictionPolicy NoEvictionPolicy `json:"noEvictionPolicy,omitempty"`
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// Users can selectively disable certain default protection rules or enable extra ones.
|
||||
PodProtections PodProtections `json:"podProtections,omitempty"`
|
||||
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "PodsWithLocalStorage" instead.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "DaemonSetPods" instead.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "SystemCriticalPods" instead.
|
||||
EvictSystemCriticalPods bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithPVC" instead.
|
||||
IgnorePvcPods bool `json:"ignorePvcPods,omitempty"`
|
||||
// Deprecated: Use ExtraPodProtection with "PodsWithoutPDB" instead.
|
||||
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
|
||||
// Deprecated: Use DisabledDefaultPodProtection with "FailedBarePods" instead.
|
||||
EvictFailedBarePods bool `json:"evictFailedBarePods,omitempty"`
|
||||
}
|
||||
|
||||
// PodProtection defines the protection policy for a pod.
|
||||
type PodProtection string
|
||||
|
||||
const (
|
||||
PodsWithLocalStorage PodProtection = "PodsWithLocalStorage"
|
||||
DaemonSetPods PodProtection = "DaemonSetPods"
|
||||
SystemCriticalPods PodProtection = "SystemCriticalPods"
|
||||
FailedBarePods PodProtection = "FailedBarePods"
|
||||
PodsWithPVC PodProtection = "PodsWithPVC"
|
||||
PodsWithoutPDB PodProtection = "PodsWithoutPDB"
|
||||
PodsWithResourceClaims PodProtection = "PodsWithResourceClaims"
|
||||
)
|
||||
|
||||
// PodProtections holds the list of enabled and disabled protection policies.
|
||||
// NOTE: The list of default enabled pod protection policies is subject to change in future versions.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type PodProtections struct {
|
||||
// ExtraEnabled specifies additional protection policies that should be enabled.
|
||||
// Supports: PodsWithPVC, PodsWithoutPDB
|
||||
ExtraEnabled []PodProtection `json:"extraEnabled,omitempty"`
|
||||
|
||||
// DefaultDisabled specifies which default protection policies should be disabled.
|
||||
// Supports: PodsWithLocalStorage, DaemonSetPods, SystemCriticalPods, FailedBarePods
|
||||
DefaultDisabled []PodProtection `json:"defaultDisabled,omitempty"`
|
||||
}
|
||||
|
||||
// defaultPodProtections holds the list of protection policies that are enabled by default.
|
||||
// User can use the 'disabledDefaultPodProtections' evictor arguments (via PodProtections.DefaultDisabled)
|
||||
// to disable any of these default protections.
|
||||
//
|
||||
// The following four policies are included by default:
|
||||
// - PodsWithLocalStorage: Protects pods with local storage.
|
||||
// - DaemonSetPods: Protects DaemonSet managed pods.
|
||||
// - SystemCriticalPods: Protects system-critical pods.
|
||||
// - FailedBarePods: Protects failed bare pods (not part of any controller).
|
||||
var defaultPodProtections = []PodProtection{
|
||||
PodsWithLocalStorage,
|
||||
SystemCriticalPods,
|
||||
FailedBarePods,
|
||||
DaemonSetPods,
|
||||
}
|
||||
|
||||
// extraPodProtections holds a list of protection policies that the user can optionally enable
|
||||
// through the configuration (via PodProtections.ExtraEnabled). These policies are not enabled by default.
|
||||
//
|
||||
// Currently supported extra policies:
|
||||
// - PodsWithPVC: Protects pods using PersistentVolumeClaims.
|
||||
// - PodsWithoutPDB: Protects pods lacking a PodDisruptionBudget.
|
||||
// - PodsWithResourceClaims: Protects pods using ResourceClaims.
|
||||
var extraPodProtections = []PodProtection{
|
||||
PodsWithPVC,
|
||||
PodsWithoutPDB,
|
||||
PodsWithResourceClaims,
|
||||
}
|
||||
|
||||
// NoEvictionPolicy dictates whether a no-eviction policy is preferred or mandatory.
|
||||
// Needs to be used with caution as this will give users ability to protect their pods
|
||||
// from eviction. Which might work against enfored policies. E.g. plugins evicting pods
|
||||
// violating security policies.
|
||||
type NoEvictionPolicy string
|
||||
|
||||
const (
|
||||
// PreferredNoEvictionPolicy interprets the no-eviction policy as a preference.
|
||||
// Meaning the annotation will get ignored by the DefaultEvictor plugin.
|
||||
// Yet, plugins may optionally sort their pods based on the annotation
|
||||
// and focus on evicting pods that do not set the annotation.
|
||||
PreferredNoEvictionPolicy NoEvictionPolicy = "Preferred"
|
||||
|
||||
// MandatoryNoEvictionPolicy interprets the no-eviction policy as mandatory.
|
||||
// Every pod carying the annotation will get excluded from eviction.
|
||||
MandatoryNoEvictionPolicy NoEvictionPolicy = "Mandatory"
|
||||
)
|
||||
|
||||
@@ -15,22 +15,75 @@ package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"slices"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func ValidateDefaultEvictorArgs(obj runtime.Object) error {
|
||||
args := obj.(*DefaultEvictorArgs)
|
||||
|
||||
var allErrs []error
|
||||
if args.PriorityThreshold != nil && args.PriorityThreshold.Value != nil && len(args.PriorityThreshold.Name) > 0 {
|
||||
return fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set, got %v", args)
|
||||
allErrs = append(allErrs, fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"))
|
||||
}
|
||||
|
||||
if args.MinReplicas == 1 {
|
||||
klog.V(4).Info("DefaultEvictor minReplicas must be greater than 1 to check for min pods during eviction. This check will be ignored during eviction.")
|
||||
}
|
||||
|
||||
return nil
|
||||
if args.NoEvictionPolicy != "" {
|
||||
if args.NoEvictionPolicy != PreferredNoEvictionPolicy && args.NoEvictionPolicy != MandatoryNoEvictionPolicy {
|
||||
allErrs = append(allErrs, fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}))
|
||||
}
|
||||
}
|
||||
|
||||
// check if any deprecated fields are set to true
|
||||
hasDeprecatedFields := args.EvictLocalStoragePods || args.EvictDaemonSetPods ||
|
||||
args.EvictSystemCriticalPods || args.IgnorePvcPods ||
|
||||
args.EvictFailedBarePods || args.IgnorePodsWithoutPDB
|
||||
|
||||
// disallow mixing deprecated fields with PodProtections.ExtraEnabled and PodProtections.DefaultDisabled
|
||||
if hasDeprecatedFields && (len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0) {
|
||||
allErrs = append(allErrs, fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"))
|
||||
}
|
||||
|
||||
if len(args.PodProtections.ExtraEnabled) > 0 || len(args.PodProtections.DefaultDisabled) > 0 {
|
||||
|
||||
for _, policy := range args.PodProtections.ExtraEnabled {
|
||||
if !slices.Contains(extraPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in ExtraEnabled: %q. Valid options are: %v",
|
||||
string(policy), extraPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range args.PodProtections.DefaultDisabled {
|
||||
if !slices.Contains(defaultPodProtections, policy) {
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid pod protection policy in DefaultDisabled: %q. Valid options are: %v",
|
||||
string(policy), defaultPodProtections))
|
||||
}
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.DefaultDisabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.DefaultDisabled contains duplicate entries"))
|
||||
}
|
||||
|
||||
if hasDuplicates(args.PodProtections.ExtraEnabled) {
|
||||
allErrs = append(allErrs, fmt.Errorf("PodProtections.ExtraEnabled contains duplicate entries"))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
func hasDuplicates(slice []PodProtection) bool {
|
||||
seen := make(map[PodProtection]struct{}, len(slice))
|
||||
for _, item := range slice {
|
||||
if _, exists := seen[item]; exists {
|
||||
return true
|
||||
}
|
||||
seen[item] = struct{}{}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
215
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
215
pkg/framework/plugins/defaultevictor/validation_test.go
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultevictor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateDefaultEvictorArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args *DefaultEvictorArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid priority",
|
||||
args: &DefaultEvictorArgs{
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: utilptr.To[int32](1),
|
||||
Name: "priority-name",
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("priority threshold misconfigured, only one of priorityThreshold fields can be set"),
|
||||
},
|
||||
{
|
||||
name: "passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
},
|
||||
errInfo: fmt.Errorf("noEvictionPolicy accepts only %q values", []NoEvictionPolicy{PreferredNoEvictionPolicy, MandatoryNoEvictionPolicy}),
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with no deprecated fields",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{},
|
||||
ExtraEnabled: []PodProtection{},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration: both Disabled and ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
PodsWithLocalStorage,
|
||||
},
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Valid configuration with ExtraEnabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with Disabled",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: true,
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{
|
||||
DaemonSetPods,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "Invalid configuration: Deprecated field used with ExtraPodProtections",
|
||||
args: &DefaultEvictorArgs{
|
||||
EvictDaemonSetPods: true,
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{
|
||||
PodsWithPVC,
|
||||
},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("cannot use Deprecated fields alongside PodProtections.ExtraEnabled or PodProtections.DefaultDisabled"),
|
||||
},
|
||||
{
|
||||
name: "MinReplicas warning logged but no error",
|
||||
args: &DefaultEvictorArgs{
|
||||
MinReplicas: 1,
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "InvalidPolicy". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{"PodsWithPVCC"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "PodsWithPVCC". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled: Policy from DefaultDisabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{DaemonSetPods},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in ExtraEnabled: "DaemonSetPods". Valid options are: [PodsWithPVC PodsWithoutPDB PodsWithResourceClaims]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Unknown policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"InvalidPolicy"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "InvalidPolicy". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Misspelled policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{"PodsWithLocalStorag"},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithLocalStorag". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled: Policy from ExtraEnabled list",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`invalid pod protection policy in DefaultDisabled: "PodsWithPVC". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods]`),
|
||||
},
|
||||
{
|
||||
name: "Invalid ExtraEnabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.ExtraEnabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate",
|
||||
args: &DefaultEvictorArgs{
|
||||
PodProtections: PodProtections{
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`PodProtections.DefaultDisabled contains duplicate entries`),
|
||||
},
|
||||
{
|
||||
name: "Invalid DefaultDisabled duplicate and Invalid ExtraEnabled duplicate and passing invalid no eviction policy",
|
||||
args: &DefaultEvictorArgs{
|
||||
NoEvictionPolicy: "invalid-no-eviction-policy",
|
||||
PodProtections: PodProtections{
|
||||
ExtraEnabled: []PodProtection{PodsWithPVC, PodsWithPVC},
|
||||
DefaultDisabled: []PodProtection{PodsWithLocalStorage, PodsWithLocalStorage, PodsWithoutPDB},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf(`[noEvictionPolicy accepts only ["Preferred" "Mandatory"] values, invalid pod protection policy in DefaultDisabled: "PodsWithoutPDB". Valid options are: [PodsWithLocalStorage SystemCriticalPods FailedBarePods DaemonSetPods], PodProtections.DefaultDisabled contains duplicate entries, PodProtections.ExtraEnabled contains duplicate entries]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateDefaultEvictorArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -46,6 +46,7 @@ func (in *DefaultEvictorArgs) DeepCopyInto(out *DefaultEvictorArgs) {
|
||||
*out = new(v1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
in.PodProtections.DeepCopyInto(&out.PodProtections)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -66,3 +67,29 @@ func (in *DefaultEvictorArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodProtections) DeepCopyInto(out *PodProtections) {
|
||||
*out = *in
|
||||
if in.ExtraEnabled != nil {
|
||||
in, out := &in.ExtraEnabled, &out.ExtraEnabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.DefaultDisabled != nil {
|
||||
in, out := &in.DefaultDisabled, &out.DefaultDisabled
|
||||
*out = make([]PodProtection, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProtections.
|
||||
func (in *PodProtections) DeepCopy() *PodProtections {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodProtections)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
90
pkg/framework/plugins/example/README.md
Normal file
90
pkg/framework/plugins/example/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Descheduler Plugin: Example Implementation
|
||||
|
||||
This directory provides an example plugin for the Kubernetes Descheduler,
|
||||
demonstrating how to evict pods based on custom criteria. The plugin targets
|
||||
pods based on:
|
||||
|
||||
* **Name Regex:** Pods matching a specified regular expression.
|
||||
* **Age:** Pods older than a defined duration.
|
||||
* **Namespace:** Pods within or outside a given list of namespaces (inclusion
|
||||
or exclusion).
|
||||
|
||||
## Building and Integrating the Plugin
|
||||
|
||||
To incorporate this plugin into your Descheduler build, you must register it
|
||||
within the Descheduler's plugin registry. Follow these steps:
|
||||
|
||||
1. **Register the Plugin:**
|
||||
* Modify the `pkg/descheduler/setupplugins.go` file.
|
||||
* Add the following registration line to the end of the
|
||||
`RegisterDefaultPlugins()` function:
|
||||
|
||||
```go
|
||||
pluginregistry.Register(
|
||||
example.PluginName,
|
||||
example.New,
|
||||
&example.Example{},
|
||||
&example.ExampleArgs{},
|
||||
example.ValidateExampleArgs,
|
||||
example.SetDefaults_Example,
|
||||
registry,
|
||||
)
|
||||
```
|
||||
|
||||
2. **Generate Code:**
|
||||
* If you modify the plugin's code, execute `make gen` before rebuilding the
|
||||
Descheduler. This ensures generated code is up-to-date.
|
||||
|
||||
3. **Rebuild the Descheduler:**
|
||||
* Build the descheduler with your changes.
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
Configure the plugin's behavior using the Descheduler's policy configuration.
|
||||
Here's an example:
|
||||
|
||||
```yaml
|
||||
apiVersion: descheduler/v1alpha2
|
||||
kind: DeschedulerPolicy
|
||||
profiles:
|
||||
- name: LifecycleAndUtilization
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- Example
|
||||
pluginConfig:
|
||||
- name: Example
|
||||
args:
|
||||
regex: ^descheduler-test.*$
|
||||
maxAge: 3m
|
||||
namespaces:
|
||||
include:
|
||||
- default
|
||||
```
|
||||
|
||||
## Explanation
|
||||
|
||||
- `regex: ^descheduler-test.*$`: Evicts pods whose names match the regular
|
||||
expression `^descheduler-test.*$`.
|
||||
- `maxAge: 3m`: Evicts pods older than 3 minutes.
|
||||
- `namespaces.include: - default`: Evicts pods within the default namespace.
|
||||
|
||||
This configuration will cause the plugin to evict pods that meet all three
|
||||
criteria: matching the `regex`, exceeding the `maxAge`, and residing in the
|
||||
specified namespace.
|
||||
|
||||
## Notes
|
||||
|
||||
- This plugin is configured through the `ExampleArgs` struct, which defines the
|
||||
plugin's parameters.
|
||||
- Plugins must implement a function to validate and another to set the default
|
||||
values for their `Args` struct.
|
||||
- The fields in the `ExampleArgs` struct reflect directly into the
|
||||
`DeschedulerPolicy` configuration.
|
||||
- Plugins must comply with the `DeschedulePlugin` interface to be registered
|
||||
with the Descheduler.
|
||||
- The main functionality of the plugin is implemented in the `Deschedule()`
|
||||
method, which is called by the Descheduler when the plugin is executed.
|
||||
- A good amount of descheduling logic can be achieved by means of filters.
|
||||
- Whenever a change in the Plugin's configuration is made the developer should
|
||||
regenerate the code by running `make gen`.
|
||||
36
pkg/framework/plugins/example/defaults.go
Normal file
36
pkg/framework/plugins/example/defaults.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_Example sets the default arguments for the Example plugin. On
|
||||
// this case we set the default regex to match only empty strings (this should
|
||||
// not ever match anything). The default maximum age for pods is set to 5
|
||||
// minutes.
|
||||
func SetDefaults_Example(obj runtime.Object) {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
args.Regex = "^$"
|
||||
}
|
||||
if args.MaxAge == "" {
|
||||
args.MaxAge = "5m"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,9 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
@@ -14,5 +11,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fuzz is a library for populating go objects with random values.
|
||||
package fuzz
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
package example
|
||||
186
pkg/framework/plugins/example/example.go
Normal file
186
pkg/framework/plugins/example/example.go
Normal file
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fwtypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
// PluginName is used when registering the plugin. You need to choose a unique
|
||||
// name across all plugins. This name is used to identify the plugin config in
|
||||
// the descheduler policy.
|
||||
const PluginName = "Example"
|
||||
|
||||
// We need to ensure that the plugin struct complies with the DeschedulePlugin
|
||||
// interface. This prevent unexpected changes that may render this type
|
||||
// incompatible.
|
||||
var _ fwtypes.DeschedulePlugin = &Example{}
|
||||
|
||||
// Example is our plugin (implementing the DeschedulePlugin interface). This
|
||||
// plugin will evict pods that match a regex and are older than a certain age.
|
||||
type Example struct {
|
||||
logger klog.Logger
|
||||
handle fwtypes.Handle
|
||||
args *ExampleArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds a plugin instance from its arguments. Arguments are passed in as
|
||||
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
|
||||
// set, evictor interface, shared informer factory and other instruments shared
|
||||
// across different plugins.
|
||||
func New(ctx context.Context, args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
// make sure we are receiving the right argument type.
|
||||
exampleArgs, ok := args.(*ExampleArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
// we can use the included and excluded namespaces to filter the pods we want
|
||||
// to evict.
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if exampleArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.New(exampleArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(exampleArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// here we create a pod filter that will return only pods that can be
|
||||
// evicted (according to the evictor and inside the namespaces we want).
|
||||
// NOTE: here we could also add a function to filter out by the regex and
|
||||
// age but for sake of the example we are keeping it simple and filtering
|
||||
// those out in the Deschedule() function.
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithFilter(
|
||||
podutil.WrapFilterFuncs(
|
||||
handle.Evictor().Filter,
|
||||
handle.Evictor().PreEvictionFilter,
|
||||
),
|
||||
).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
return &Example{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: exampleArgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the plugin name.
|
||||
func (d *Example) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
// Deschedule is the function where most of the logic around eviction is laid
|
||||
// down. Here we go through all pods in all nodes and evict the ones that match
|
||||
// the regex and are older than the maximum age. This function receives a list
|
||||
// of nodes we need to process.
|
||||
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
|
||||
var podsToEvict []*v1.Pod
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
|
||||
logger.Info("Example plugin starting descheduling")
|
||||
|
||||
re, err := regexp.Compile(d.args.Regex)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to compile regex: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(d.args.MaxAge)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to parse max age: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// here we create an auxiliar filter to remove all pods that don't
|
||||
// match the provided regex or are still too young to be evicted.
|
||||
// This filter will be used when we list all pods on a node. This
|
||||
// filter here could have been part of the podFilter but we are
|
||||
// keeping it separate for the sake of the example.
|
||||
filter := func(pod *v1.Pod) bool {
|
||||
if !re.MatchString(pod.Name) {
|
||||
return false
|
||||
}
|
||||
deadline := pod.CreationTimestamp.Add(duration)
|
||||
return time.Now().After(deadline)
|
||||
}
|
||||
|
||||
// go node by node getting all pods that we can evict.
|
||||
for _, node := range nodes {
|
||||
// ListAllPodsOnANode is a helper function that retrieves all pods filtering out the ones we can't evict.
|
||||
// ListPodsOnANode is a helper function that retrieves all pods(excluding Succeeded or Failed phases) filtering out the ones we can't evict.
|
||||
// We merge the default filters with the one we created above.
|
||||
//
|
||||
// The difference between ListPodsOnANode and ListAllPodsOnANode lies in their handling of Pods based on their phase:
|
||||
// - ListPodsOnANode excludes Pods that are in Succeeded or Failed phases because they do not occupy any resources.
|
||||
// - ListAllPodsOnANode does not exclude Pods based on their phase, listing all Pods regardless of their state.
|
||||
//
|
||||
// In this context, we prefer using ListPodsOnANode because:
|
||||
// 1. It ensures that only active Pods (not in Succeeded or Failed states) are considered for eviction.
|
||||
// 2. This helps avoid unnecessary processing of Pods that no longer consume resources.
|
||||
// 3. By applying an additional filter (d.podFilter and filter), we can further refine which Pods are eligible for eviction,
|
||||
// ensuring that only Pods meeting specific criteria are selected.
|
||||
//
|
||||
// However, if you need to consider all Pods including those in Succeeded or Failed states for other purposes,
|
||||
// you should use ListAllPodsOnANode instead.
|
||||
pods, err := podutil.ListPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, filter),
|
||||
)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to list pods: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// as we have already filtered out pods that don't match the
|
||||
// regex or are too young we can simply add them all to the
|
||||
// eviction list.
|
||||
podsToEvict = append(podsToEvict, pods...)
|
||||
}
|
||||
|
||||
// evict all the pods.
|
||||
for _, pod := range podsToEvict {
|
||||
logger.Info("Example plugin evicting pod", "pod", klog.KObj(pod))
|
||||
opts := evictions.EvictOptions{StrategyName: PluginName}
|
||||
if err := d.handle.Evictor().Evict(ctx, pod, opts); err != nil {
|
||||
logger.Error(err, "unable to evict pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Example plugin finished descheduling")
|
||||
return nil
|
||||
}
|
||||
31
pkg/framework/plugins/example/register.go
Normal file
31
pkg/framework/plugins/example/register.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder()
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
||||
45
pkg/framework/plugins/example/types.go
Normal file
45
pkg/framework/plugins/example/types.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ExampleArgs holds a list of arguments used to configure the plugin. For this
|
||||
// simple example we only care about a regex, a maximum age and possibly a list
|
||||
// of namespaces to which we want to apply the descheduler. This plugin evicts
|
||||
// pods that match a given regular expression and are older than the maximum
|
||||
// allowed age. Most of the fields here were defined as strings so we can
|
||||
// validate them somewhere else (show you a better implementation example).
|
||||
type ExampleArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Regex is a regular expression we use to match against pod names. If
|
||||
// the pod name matches the regex it will be evicted. This is expected
|
||||
// to be a valid regular expression (according to go's regexp package).
|
||||
Regex string `json:"regex"`
|
||||
|
||||
// MaxAge is the maximum age a pod can have before it is considered for
|
||||
// eviction. This is expected to be a valid time.Duration.
|
||||
MaxAge string `json:"maxAge"`
|
||||
|
||||
// Namespaces allows us to filter on which namespaces we want to apply
|
||||
// the descheduler.
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
}
|
||||
45
pkg/framework/plugins/example/validation.go
Normal file
45
pkg/framework/plugins/example/validation.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// ValidateExampleArgs validates if the plugin arguments are correct (we have
|
||||
// everything we need). On this case we only validate if we have a valid
|
||||
// regular expression and maximum age.
|
||||
func ValidateExampleArgs(obj runtime.Object) error {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
return fmt.Errorf("regex argument must be set")
|
||||
}
|
||||
|
||||
if _, err := regexp.Compile(args.Regex); err != nil {
|
||||
return fmt.Errorf("invalid regex: %v", err)
|
||||
}
|
||||
|
||||
if _, err := time.ParseDuration(args.MaxAge); err != nil {
|
||||
return fmt.Errorf("invalid max age: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExampleArgs) DeepCopyInto(out *ExampleArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleArgs.
|
||||
func (in *ExampleArgs) DeepCopy() *ExampleArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExampleArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ExampleArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
// Classifier is a function that classifies a resource usage based on a limit.
|
||||
// The function should return true if the resource usage matches the classifier
|
||||
// intent.
|
||||
type Classifier[K comparable, V any] func(K, V, V) bool
|
||||
|
||||
// Comparer is a function that compares two objects. This function should return
|
||||
// -1 if the first object is less than the second, 0 if they are equal, and 1 if
|
||||
// the first object is greater than the second. Of course this is a simplification
|
||||
// and any value between -1 and 1 can be returned.
|
||||
type Comparer[V any] func(V, V) int
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Limits is a map of list of limits indexed by a comparable key. Each limit
|
||||
// inside the list requires a classifier to evaluate.
|
||||
type Limits[K comparable, V any] map[K][]V
|
||||
|
||||
// Classify is a function that classifies based on classifier functions. This
|
||||
// function receives Values, a list of n Limits (indexed by name), and a list
|
||||
// of n Classifiers. The classifier at n position is called to evaluate the
|
||||
// limit at n position. The first classifier to return true will receive the
|
||||
// value, at this point the loop will break and the next value will be
|
||||
// evaluated. This function returns a slice of maps, each position in the
|
||||
// returned slice correspond to one of the classifiers (e.g. if n limits
|
||||
// and classifiers are provided, the returned slice will have n maps).
|
||||
func Classify[K comparable, V any](
|
||||
values Values[K, V], limits Limits[K, V], classifiers ...Classifier[K, V],
|
||||
) []map[K]V {
|
||||
result := make([]map[K]V, len(classifiers))
|
||||
for i := range classifiers {
|
||||
result[i] = make(map[K]V)
|
||||
}
|
||||
|
||||
for index, usage := range values {
|
||||
for i, limit := range limits[index] {
|
||||
if len(classifiers) <= i {
|
||||
continue
|
||||
}
|
||||
if !classifiers[i](index, usage, limit) {
|
||||
continue
|
||||
}
|
||||
result[i][index] = usage
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ForMap is a function that returns a classifier that compares all values in a
|
||||
// map. The function receives a Comparer function that is used to compare all
|
||||
// the map values. The returned Classifier will return true only if the
|
||||
// provided Comparer function returns a value less than 0 for all the values.
|
||||
func ForMap[K, I comparable, V any, M ~map[I]V](cmp Comparer[V]) Classifier[K, M] {
|
||||
return func(_ K, usages, limits M) bool {
|
||||
for idx, usage := range usages {
|
||||
if limit, ok := limits[idx]; ok {
|
||||
if cmp(usage, limit) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,739 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestClassifySimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]int
|
||||
limits map[string][]int
|
||||
classifiers []Classifier[string, int]
|
||||
expected []map[string]int
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]int{},
|
||||
limits: map[string][]int{},
|
||||
expected: []map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "one under one over",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{"node1": 2},
|
||||
{"node2": 8},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
"node3": 2,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
"node3": {4, 6},
|
||||
"node4": {4, 6},
|
||||
"node5": {4, 6},
|
||||
"node6": {4, 6},
|
||||
"node7": {4, 6},
|
||||
"node8": {4, 6},
|
||||
"node9": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{
|
||||
"node1": 2,
|
||||
"node3": 2,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
},
|
||||
{
|
||||
"node2": 8,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify_pointers(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]map[v1.ResourceName]*resource.Quantity
|
||||
limits map[string][]map[v1.ResourceName]*resource.Quantity
|
||||
classifiers []Classifier[string, map[v1.ResourceName]*resource.Quantity]
|
||||
expected []map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("5")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("5Gi")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("8")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("8Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return limit.Cmp(*usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string][]v1.ResourceList
|
||||
classifiers []Classifier[string, v1.ResourceList]
|
||||
expected []map[string]v1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string][]v1.ResourceList{},
|
||||
expected: []map[string]v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("80"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and multiple over utilized nodes",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "over and under at the same time",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only memory over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node9": {v1.ResourceCPU: resource.MustParse("5")},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node2": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node3": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node4": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node5": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node6": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node7": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node8": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node9": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -29,164 +29,247 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
targetThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler
|
||||
// can schedule according to its plugin. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
type HighNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
criteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
highThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle.
|
||||
func NewHighNodeUtilization(
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type HighNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", HighNodeUtilizationPluginName)
|
||||
|
||||
// this plugins worries only about thresholds but the nodeplugins
|
||||
// package was made to take two thresholds into account, one for low
|
||||
// and another for high usage. here we make sure we set the high
|
||||
// threshold to the maximum value for all resources for which we have a
|
||||
// threshold.
|
||||
highThresholds := make(api.ResourceThresholds)
|
||||
for rname := range args.Thresholds {
|
||||
highThresholds[rname] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
// get the resource names for which we have a threshold. this is
|
||||
// later used when determining if we are going to evict a pod.
|
||||
resourceThresholds := getResourceNames(args.Thresholds)
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range highNodeUtilizatioArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
|
||||
}
|
||||
// by default we evict pods from the under utilized nodes even if they
|
||||
// don't define a request for a given threshold. this works most of the
|
||||
// times and there is an use case for it. When using the restrict mode
|
||||
// we evaluate if the pod has a request for any of the resources the
|
||||
// user has provided as threshold.
|
||||
filters := []podutil.FilterFunc{handle.Evictor().Filter}
|
||||
if slices.Contains(args.EvictionModes, EvictionModeOnlyThresholdingResources) {
|
||||
filters = append(
|
||||
filters,
|
||||
withResourceRequestForAny(resourceThresholds...),
|
||||
)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(podutil.WrapFilterFuncs(filters...)).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
// resourceNames is a list of all resource names this plugin cares
|
||||
// about. we care about the resources for which we have a threshold and
|
||||
// all we consider the basic resources (cpu, memory, pods).
|
||||
resourceNames := uniquifyResourceNames(
|
||||
append(
|
||||
resourceThresholds,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
resourceNames: resourceNames,
|
||||
targetThresholds: targetThresholds,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
resourceNames: resourceNames,
|
||||
highThresholds: highThresholds,
|
||||
criteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(
|
||||
resourceNames,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (h *HighNodeUtilization) Name() string {
|
||||
return HighNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from under
|
||||
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
|
||||
// less nodes are used.
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := h.usageClient.sync(nodes); err != nil {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
if err := h.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, h.usageClient),
|
||||
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageClient),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
// take a picture of the current state of the nodes, everything else
|
||||
// here is based on this snapshot.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, h.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// node usages are not presented as percentages over the capacity.
|
||||
// we need to normalize them to be able to compare them with the
|
||||
// thresholds. thresholds are already provided by the user in
|
||||
// percentage.
|
||||
usage, thresholds := assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap, capacities, h.args.Thresholds, h.highThresholds,
|
||||
)
|
||||
|
||||
// classify nodes in two groups: underutilized and schedulable. we will
|
||||
// later try to move pods from the first group to the second.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilized nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
|
||||
// schedulable nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
})
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// log message in one line
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
|
||||
// the nodeplugin package works by means of NodeInfo structures. these
|
||||
// structures hold a series of information about the nodes. now that
|
||||
// we have classified the nodes, we can build the NodeInfo structures
|
||||
// for each group. NodeInfo structs carry usage and available resources
|
||||
// for each node.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
category := []string{"underutilized", "overutilized"}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", category[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
h.resourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
if len(lowNodes) <= h.args.NumberOfNodes {
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", h.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(schedulableNodes) == 0 {
|
||||
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stops the eviction process if the total available capacity sage has
|
||||
// dropped to zero - no more pods can be scheduled. this will signalize
|
||||
// to stop if any of the available resources has dropped to zero.
|
||||
continueEvictionCond := func(_ NodeInfo, avail api.ReferencedResourceList) bool {
|
||||
for name := range avail {
|
||||
if avail[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in ascending order
|
||||
sortNodesByUsage(sourceNodes, true)
|
||||
// sorts the nodes by the usage in ascending order.
|
||||
sortNodesByUsage(lowNodes, true)
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
h.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
schedulableNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
h.resourceNames,
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
// Default targetThreshold resource values to 100
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
evictionModes []EvictionMode
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
@@ -244,7 +245,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
@@ -433,6 +434,53 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource threshold and no extended resource pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
extendedResource: 40,
|
||||
},
|
||||
evictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// pods on node1 have the extended resource
|
||||
// request set and they put the node in the
|
||||
// over utilization range.
|
||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
// pods in the other nodes must not be evicted
|
||||
// because they do not have the extended
|
||||
// resource defined in their requests.
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -474,10 +522,14 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
handle)
|
||||
plugin, err := NewHighNodeUtilization(
|
||||
ctx,
|
||||
&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
EvictionModes: testCase.evictionModes,
|
||||
},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
@@ -586,7 +638,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
plugin, err := NewHighNodeUtilization(ctx, &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 40,
|
||||
},
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -29,147 +28,263 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
overutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized
|
||||
// nodes. Note that CPU/Memory requests are used to calculate nodes'
|
||||
// utilization and not the actual resource usage.
|
||||
type LowNodeUtilization struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underCriteria []any
|
||||
overCriteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
extendedResourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a
|
||||
// handle. this plugin aims to move workload from overutilized nodes to
|
||||
// underutilized nodes.
|
||||
func NewLowNodeUtilization(
|
||||
ctx context.Context, genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type LowNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", LowNodeUtilizationPluginName)
|
||||
|
||||
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
|
||||
// resourceNames holds a list of resources for which the user has
|
||||
// provided thresholds for. extendedResourceNames holds those as well
|
||||
// as cpu, memory and pods if no prometheus collection is used.
|
||||
resourceNames := getResourceNames(args.Thresholds)
|
||||
extendedResourceNames := resourceNames
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
|
||||
// if we are using prometheus we need to validate we have everything we
|
||||
// need. if we aren't then we need to make sure we are also collecting
|
||||
// data for cpu, memory and pods.
|
||||
metrics := args.MetricsUtilization
|
||||
if metrics != nil && metrics.Source == api.PrometheusMetrics {
|
||||
if err := validatePrometheusMetricsUtilization(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
extendedResourceNames = uniquifyResourceNames(
|
||||
append(
|
||||
resourceNames,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
overutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
|
||||
|
||||
var usageClient usageClient
|
||||
if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
// this plugins supports different ways of collecting usage data. each
|
||||
// different way provides its own "usageClient". here we make sure we
|
||||
// have the correct one or an error is triggered. XXX MetricsServer is
|
||||
// deprecated, removed once dropped.
|
||||
var usageClient usageClient = newRequestedUsageClient(
|
||||
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(),
|
||||
)
|
||||
if metrics != nil {
|
||||
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
} else {
|
||||
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
overutilizationCriteria: overutilizationCriteria,
|
||||
resourceNames: resourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: args,
|
||||
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
overCriteria: thresholdsToKeysAndValues(args.TargetThresholds),
|
||||
resourceNames: resourceNames,
|
||||
extendedResourceNames: extendedResourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (l *LowNodeUtilization) Name() string {
|
||||
return LowNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from over
|
||||
// utilized nodes to under utilized nodes. The goal here is to evenly
|
||||
// distribute pods across nodes.
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := l.usageClient.sync(nodes); err != nil {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
if err := l.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, l.usageClient),
|
||||
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageClient),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
// starts by taking a snapshot ofthe nodes usage. we will use this
|
||||
// snapshot to assess the nodes usage and classify them as
|
||||
// underutilized or overutilized.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, l.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// usage, by default, is exposed in absolute values. we need to normalize
|
||||
// them (convert them to percentages) to be able to compare them with the
|
||||
// user provided thresholds. thresholds are already provided in percentage
|
||||
// in the <0; 100> interval.
|
||||
var usage map[string]api.ResourceThresholds
|
||||
var thresholds map[string][]api.ResourceThresholds
|
||||
if l.args.UseDeviationThresholds {
|
||||
// here the thresholds provided by the user represent
|
||||
// deviations from the average so we need to treat them
|
||||
// differently. when calculating the average we only
|
||||
// need to consider the resources for which the user
|
||||
// has provided thresholds.
|
||||
usage, thresholds = assessNodesUsagesAndRelativeThresholds(
|
||||
filterResourceNames(nodesUsageMap, l.resourceNames),
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
} else {
|
||||
usage, thresholds = assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap,
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
}
|
||||
|
||||
// classify nodes in under and over utilized. we will later try to move
|
||||
// pods from the overutilized nodes to the underutilized ones.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilization criteria processing. nodes that are
|
||||
// underutilized but aren't schedulable are ignored.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
logger.V(2).Info(
|
||||
"Node is unschedulable, thus not considered as underutilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
|
||||
// overutilization criteria evaluation.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeAboveThreshold(usage, threshold)
|
||||
},
|
||||
)
|
||||
|
||||
// log message for nodes with low utilization
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
// the nodeutilization package was designed to work with NodeInfo
|
||||
// structs. these structs holds information about how utilized a node
|
||||
// is. we need to go through the result of the classification and turn
|
||||
// it into NodeInfo structs.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
categories := []string{"underutilized", "overutilized"}
|
||||
classifiedNodes := map[string]bool{}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
classifiedNodes[nodeName] = true
|
||||
|
||||
// log message for over utilized nodes
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||
logger.Info(
|
||||
"Node has been classified",
|
||||
"category", categories[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
l.extendedResourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// log nodes that are appropriately utilized.
|
||||
for nodeName := range nodesMap {
|
||||
if !classifiedNodes[nodeName] {
|
||||
logger.Info(
|
||||
"Node is appropriately utilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
// log messages for nodes with low and high utilization
|
||||
logger.V(1).Info("Criteria for a node under utilization", l.underCriteria...)
|
||||
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
logger.V(1).Info("Criteria for a node above target utilization", l.overCriteria...)
|
||||
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
logger.V(1).Info(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
|
||||
logger.V(1).Info(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", l.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
logger.V(1).Info("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
if len(highNodes) == 0 {
|
||||
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||
// this is a stop condition for the eviction process. we stop as soon
|
||||
// as the node usage drops below the threshold.
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.available) {
|
||||
return false
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -181,52 +296,90 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(sourceNodes, false)
|
||||
// sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(highNodes, false)
|
||||
|
||||
var nodeLimit *uint
|
||||
if l.args.EvictionLimits != nil {
|
||||
nodeLimit = l.args.EvictionLimits.Node
|
||||
}
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
l.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
l.resourceNames,
|
||||
l.extendedResourceNames,
|
||||
continueEvictionCond,
|
||||
l.usageClient,
|
||||
nodeLimit,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
// validatePrometheusMetricsUtilization validates the Prometheus metrics
|
||||
// utilization. XXX this should be done way earlier than this.
|
||||
func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error {
|
||||
if args.MetricsUtilization.Prometheus == nil {
|
||||
return fmt.Errorf("prometheus property is missing")
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
if args.MetricsUtilization.Prometheus.Query == "" {
|
||||
return fmt.Errorf("prometheus query is missing")
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
uResourceNames := getResourceNames(args.Thresholds)
|
||||
oResourceNames := getResourceNames(args.TargetThresholds)
|
||||
if len(uResourceNames) != 1 || uResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"thresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, uResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
if len(oResourceNames) != 1 || oResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"targetThresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, oResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// usageClientForMetrics returns the correct usage client based on the
|
||||
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
|
||||
func usageClientForMetrics(
|
||||
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
|
||||
) (usageClient, error) {
|
||||
metrics := args.MetricsUtilization
|
||||
switch {
|
||||
case metrics.MetricsServer, metrics.Source == api.KubernetesMetrics:
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
return newActualUsageClient(
|
||||
resources,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.MetricsCollector(),
|
||||
), nil
|
||||
|
||||
case metrics.Source == api.PrometheusMetrics:
|
||||
if handle.PrometheusClient() == nil {
|
||||
return nil, fmt.Errorf("prometheus client not initialized")
|
||||
}
|
||||
return newPrometheusUsageClient(
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.PrometheusClient(),
|
||||
metrics.Prometheus.Query,
|
||||
), nil
|
||||
case metrics.Source != "":
|
||||
return nil, fmt.Errorf("unrecognized metrics source")
|
||||
default:
|
||||
return nil, fmt.Errorf("metrics source is empty")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -40,6 +41,8 @@ import (
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
@@ -63,6 +66,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsWithMetricsEvicted uint
|
||||
evictedPods []string
|
||||
evictableNamespaces *api.Namespaces
|
||||
evictionLimits *api.EvictionLimits
|
||||
}{
|
||||
{
|
||||
name: "no evictable pods",
|
||||
@@ -712,6 +716,60 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes with deviation",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 5,
|
||||
extendedResource: 10,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 5,
|
||||
extendedResource: 10,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p2", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node is unschedulable",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -1018,6 +1076,79 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds and overevicting memory",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// totalcpuusage = 3600m, avgcpuusage = 3600/12000 = 0.3 => 30%
|
||||
// totalpodsusage = 9, avgpodsusage = 9/30 = 0.3 => 30%
|
||||
// n1 and n2 are fully memory utilized
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 375, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n2NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n3NodeName, 4000, 3000),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 375),
|
||||
test.BuildPodMetrics("p2", 400, 375),
|
||||
test.BuildPodMetrics("p3", 400, 375),
|
||||
test.BuildPodMetrics("p4", 400, 375),
|
||||
test.BuildPodMetrics("p5", 400, 375),
|
||||
test.BuildPodMetrics("p6", 400, 375),
|
||||
test.BuildPodMetrics("p7", 400, 375),
|
||||
test.BuildPodMetrics("p8", 400, 375),
|
||||
test.BuildPodMetrics("p9", 400, 3000),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "without priorities different evictions for requested and actual resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -1122,6 +1253,72 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "without priorities with node eviction limit",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
evictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](2),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -1189,14 +1386,18 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
handle.MetricsCollectorImpl = collector
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
var metricsUtilization *MetricsUtilization
|
||||
if metricsEnabled {
|
||||
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictionLimits: tc.evictionLimits,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
MetricsUtilization: MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
MetricsUtilization: metricsUtilization,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
@@ -1350,7 +1551,7 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
plugin, err := NewLowNodeUtilization(ctx, &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
@@ -1370,3 +1571,278 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withLocalStorage(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
func withCriticalPod(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samples model.Vector
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
args *LowNodeUtilizationArgs
|
||||
}{
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with more evictions",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](3),
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with deviation",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](2),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query and deviation thresholds",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
UseDeviationThresholds: true,
|
||||
Thresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
TargetThresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 1),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.5),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(tc.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
handle.PrometheusClientImpl = &fakePromClient{
|
||||
result: tc.samples,
|
||||
dataType: model.ValVector,
|
||||
}
|
||||
plugin, err := NewLowNodeUtilization(ctx, tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
status := plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
if status != nil {
|
||||
t.Fatalf("Balance.err: %v", status.Err)
|
||||
}
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -27,188 +29,122 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||
type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage map[v1.ResourceName]*resource.Quantity
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodeThresholds struct {
|
||||
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
NodeUsage
|
||||
thresholds NodeThresholds
|
||||
}
|
||||
|
||||
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
// []NodeUsage is a snapshot, so allPods can not be read any time to avoid
|
||||
// breaking consistency between the node's actual usage and available pods.
|
||||
//
|
||||
// New data model:
|
||||
// - node usage: map[string]api.ReferencedResourceList
|
||||
// - thresholds: map[string]api.ReferencedResourceList
|
||||
// - all pods: map[string][]*v1.Pod
|
||||
//
|
||||
// After classification:
|
||||
// - each group will have its own (smaller) node usage and thresholds and
|
||||
// allPods.
|
||||
//
|
||||
// Both node usage and thresholds are needed to compute the remaining resources
|
||||
// that can be evicted/can accepted evicted pods.
|
||||
//
|
||||
// 1. translate node usages into percentages as float or int64 (how much
|
||||
// precision is lost?, maybe use BigInt?).
|
||||
// 2. produce thresholds (if they need to be computed, otherwise use user
|
||||
// provided, they are already in percentages).
|
||||
// 3. classify nodes into groups.
|
||||
// 4. produces a list of nodes (sorted as before) that have the node usage,
|
||||
// the threshold (only one this time) and the snapshottted pod list
|
||||
// present.
|
||||
//
|
||||
// Data wise
|
||||
// Produce separated maps for:
|
||||
// - nodes: map[string]*v1.Node
|
||||
// - node usage: map[string]api.ReferencedResourceList
|
||||
// - thresholds: map[string][]api.ReferencedResourceList
|
||||
// - pod list: map[string][]*v1.Pod
|
||||
//
|
||||
// Once the nodes are classified produce the original []NodeInfo so the code is
|
||||
// not that much changed (postponing further refactoring once it is needed).
|
||||
|
||||
const (
|
||||
// MetricResource is a special resource name we use to keep track of a
|
||||
// metric obtained from a third party entity.
|
||||
MetricResource = v1.ResourceName("MetricResource")
|
||||
// MinResourcePercentage is the minimum value of a resource's percentage
|
||||
MinResourcePercentage = 0
|
||||
// MaxResourcePercentage is the maximum value of a resource's percentage
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
func normalizePercentage(percent api.Percentage) api.Percentage {
|
||||
if percent > MaxResourcePercentage {
|
||||
return MaxResourcePercentage
|
||||
}
|
||||
if percent < MinResourcePercentage {
|
||||
return MinResourcePercentage
|
||||
}
|
||||
return percent
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource
|
||||
// usage.
|
||||
type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage api.ReferencedResourceList
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
func getNodeThresholds(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
useDeviationThresholds bool,
|
||||
usageClient usageClient,
|
||||
) map[string]NodeThresholds {
|
||||
nodeThresholdsMap := map[string]NodeThresholds{}
|
||||
|
||||
averageResourceUsagePercent := api.ResourceThresholds{}
|
||||
if useDeviationThresholds {
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, usageClient)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
nodeThresholdsMap[node.Name] = NodeThresholds{
|
||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
}
|
||||
|
||||
for _, resourceName := range resourceNames {
|
||||
if useDeviationThresholds {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if lowThreshold[resourceName] == MinResourcePercentage {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = &cap
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = &cap
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]-lowThreshold[resourceName]))
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]+highThreshold[resourceName]))
|
||||
}
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, lowThreshold[resourceName])
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, highThreshold[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nodeThresholdsMap
|
||||
// NodeInfo is an entity we use to gather information about a given node. here
|
||||
// we have its resource usage as well as the amount of available resources.
|
||||
// we use this struct to carry information around and to make it easier to
|
||||
// process.
|
||||
type NodeInfo struct {
|
||||
NodeUsage
|
||||
available api.ReferencedResourceList
|
||||
}
|
||||
|
||||
func getNodeUsage(
|
||||
// continueEvictionCont is a function that determines if we should keep
|
||||
// evicting pods or not.
|
||||
type continueEvictionCond func(NodeInfo, api.ReferencedResourceList) bool
|
||||
|
||||
// getNodeUsageSnapshot separates the snapshot into easily accesible data
|
||||
// chunks so the node usage can be processed separately. returns a map of
|
||||
// nodes, a map of their usage and a map of their pods. maps are indexed
|
||||
// by node name.
|
||||
func getNodeUsageSnapshot(
|
||||
nodes []*v1.Node,
|
||||
usageClient usageClient,
|
||||
) []NodeUsage {
|
||||
var nodeUsageList []NodeUsage
|
||||
) (
|
||||
map[string]*v1.Node,
|
||||
map[string]api.ReferencedResourceList,
|
||||
map[string][]*v1.Pod,
|
||||
) {
|
||||
// XXX node usage needs to be kept in the original resource quantity
|
||||
// since converting to percentages and back is losing precision.
|
||||
nodesUsageMap := make(map[string]api.ReferencedResourceList)
|
||||
podListMap := make(map[string][]*v1.Pod)
|
||||
nodesMap := make(map[string]*v1.Node)
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: usageClient.nodeUtilization(node.Name),
|
||||
allPods: usageClient.pods(node.Name),
|
||||
})
|
||||
nodesMap[node.Name] = node
|
||||
nodesUsageMap[node.Name] = usageClient.nodeUtilization(node.Name)
|
||||
podListMap[node.Name] = usageClient.pods(node.Name)
|
||||
}
|
||||
|
||||
return nodeUsageList
|
||||
return nodesMap, nodesUsageMap, podListMap
|
||||
}
|
||||
|
||||
func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceName, threshold api.Percentage) *resource.Quantity {
|
||||
defaultFormat := resource.DecimalSI
|
||||
if resourceName == v1.ResourceMemory {
|
||||
defaultFormat = resource.BinarySI
|
||||
// thresholdsToKeysAndValues converts a ResourceThresholds into a list of keys
|
||||
// and values. this is useful for logging.
|
||||
func thresholdsToKeysAndValues(thresholds api.ResourceThresholds) []any {
|
||||
result := []any{}
|
||||
for name, value := range thresholds {
|
||||
result = append(result, name, fmt.Sprintf("%.2f%%", value))
|
||||
}
|
||||
|
||||
resourceCapacityFraction := func(resourceNodeCapacity int64) int64 {
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given resource threshold in Quantity units.
|
||||
return int64(float64(threshold) * 0.01 * float64(resourceNodeCapacity))
|
||||
}
|
||||
|
||||
resourceCapacityQuantity := nodeCapacity.Name(resourceName, defaultFormat)
|
||||
|
||||
if resourceName == v1.ResourceCPU {
|
||||
return resource.NewMilliQuantity(resourceCapacityFraction(resourceCapacityQuantity.MilliValue()), defaultFormat)
|
||||
}
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
return result
|
||||
}
|
||||
|
||||
func roundTo2Decimals(percentage float64) float64 {
|
||||
return math.Round(percentage*100) / 100
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = nodeUsage.node.Status.Allocatable
|
||||
}
|
||||
|
||||
resourceUsagePercentage := map[v1.ResourceName]float64{}
|
||||
for resourceName, resourceUsage := range nodeUsage.usage {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
resourceUsagePercentage[resourceName] = roundTo2Decimals(resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
return resourceUsagePercentage
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(
|
||||
nodeUsages []NodeUsage,
|
||||
nodeThresholds map[string]NodeThresholds,
|
||||
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool,
|
||||
) ([]NodeInfo, []NodeInfo) {
|
||||
lowNodes, highNodes := []NodeInfo{}, []NodeInfo{}
|
||||
|
||||
for _, nodeUsage := range nodeUsages {
|
||||
nodeInfo := NodeInfo{
|
||||
NodeUsage: nodeUsage,
|
||||
thresholds: nodeThresholds[nodeUsage.node.Name],
|
||||
}
|
||||
if lowThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
lowNodes = append(lowNodes, nodeInfo)
|
||||
} else if highThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
highNodes = append(highNodes, nodeInfo)
|
||||
} else {
|
||||
klog.InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
}
|
||||
}
|
||||
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{}
|
||||
// usageToKeysAndValues converts a ReferencedResourceList into a list of
|
||||
// keys and values. this is useful for logging.
|
||||
func usageToKeysAndValues(usage api.ReferencedResourceList) []any {
|
||||
keysAndValues := []any{}
|
||||
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
||||
keysAndValues = append(keysAndValues, "CPU", quantity.MilliValue())
|
||||
}
|
||||
@@ -220,15 +156,14 @@ func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interf
|
||||
}
|
||||
for name := range usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), usage[name].Value())
|
||||
keysAndValues = append(keysAndValues, name, usage[name].Value())
|
||||
}
|
||||
}
|
||||
return keysAndValues
|
||||
}
|
||||
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on
|
||||
// the node have priority, if not evicts them based on QoS as fallback option.
|
||||
func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
@@ -239,49 +174,68 @@ func evictPodsFromSourceNodes(
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) {
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
|
||||
for _, resourceName := range resourceNames {
|
||||
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
||||
logger := klog.FromContext(ctx)
|
||||
available, err := assessAvailableResourceInNodes(destinationNodes, resourceNames)
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to assess available resources in nodes")
|
||||
return
|
||||
}
|
||||
|
||||
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
logger.V(1).Info("Total capacity to be moved", usageToKeysAndValues(available)...)
|
||||
|
||||
destinationTaints := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
for _, node := range destinationNodes {
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
for _, name := range resourceNames {
|
||||
if _, exists := node.usage[name]; !exists {
|
||||
klog.Errorf("unable to find %q resource in node's %q usage, terminating eviction", name, node.node.Name)
|
||||
return
|
||||
}
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
totalAvailableUsage[name].Add(*node.thresholds.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||
}
|
||||
destinationTaints[node.node.Name] = node.node.Spec.Taints
|
||||
}
|
||||
|
||||
// log message in one line
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(totalAvailableUsage)...)
|
||||
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
logger.V(3).Info(
|
||||
"Evicting pods from node",
|
||||
"node", klog.KObj(node.node),
|
||||
"usage", node.usage,
|
||||
)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
klog.V(2).InfoS("Pods on node", "node", klog.KObj(node.node), "allPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
|
||||
logger.V(2).Info(
|
||||
"Pods on node",
|
||||
"node", klog.KObj(node.node),
|
||||
"allPods", len(node.allPods),
|
||||
"nonRemovablePods", len(nonRemovablePods),
|
||||
"removablePods", len(removablePods),
|
||||
)
|
||||
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
|
||||
logger.V(1).Info(
|
||||
"No removable pods on node, try next node",
|
||||
"node", klog.KObj(node.node),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
logger.V(1).Info(
|
||||
"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers",
|
||||
)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts
|
||||
// them based on QoS. If there are multiple pods with same
|
||||
// priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction, usageClient)
|
||||
if err != nil {
|
||||
|
||||
if err := evictPods(
|
||||
ctx,
|
||||
evictableNamespaces,
|
||||
removablePods,
|
||||
node,
|
||||
available,
|
||||
destinationTaints,
|
||||
podEvictor,
|
||||
evictOptions,
|
||||
continueEviction,
|
||||
usageClient,
|
||||
maxNoOfPodsToEvictPerNode,
|
||||
); err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return
|
||||
@@ -291,83 +245,136 @@ func evictPodsFromSourceNodes(
|
||||
}
|
||||
}
|
||||
|
||||
// evictPods keeps evicting pods until the continueEviction function returns
|
||||
// false or we can't or shouldn't evict any more pods. available node resources
|
||||
// are updated after each eviction.
|
||||
func evictPods(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
inputPods []*v1.Pod,
|
||||
nodeInfo NodeInfo,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
totalAvailableUsage api.ReferencedResourceList,
|
||||
destinationTaints map[string][]v1.Taint,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
// preemptive check to see if we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// some namespaces can be excluded from the eviction process.
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
}
|
||||
|
||||
if continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
var evictionCounter uint = 0
|
||||
for _, pod := range inputPods {
|
||||
if maxNoOfPodsToEvictPerNode != nil && evictionCounter >= *maxNoOfPodsToEvictPerNode {
|
||||
logger.V(3).Info(
|
||||
"Max number of evictions per node per plugin reached",
|
||||
"limit", *maxNoOfPodsToEvictPerNode,
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
preEvictionFilterWithOptions, err := podutil.NewOptions().
|
||||
WithFilter(podEvictor.PreEvictionFilter).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
continue
|
||||
}
|
||||
if !utils.PodToleratesTaints(pod, destinationTaints) {
|
||||
logger.V(3).Info(
|
||||
"Skipping eviction for pod, doesn't tolerate node taint",
|
||||
"pod", klog.KObj(pod),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
// verify if we can evict the pod based on the pod evictor
|
||||
// filter and on the excluded namespaces.
|
||||
preEvictionFilterWithOptions, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(podEvictor.PreEvictionFilter).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
logger.Error(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
continue
|
||||
}
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
totalAvailableUsage[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
// in case podUsage does not support resource counting (e.g.
|
||||
// provided metric does not quantify pod resource utilization).
|
||||
unconstrainedResourceEviction := false
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
if _, ok := err.(*notSupportedError); !ok {
|
||||
logger.Error(err,
|
||||
"unable to get pod usage", "pod", klog.KObj(pod),
|
||||
)
|
||||
continue
|
||||
}
|
||||
unconstrainedResourceEviction = true
|
||||
}
|
||||
|
||||
if err := podEvictor.Evict(ctx, pod, evictOptions); err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
|
||||
return err
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if maxNoOfPodsToEvictPerNode == nil && unconstrainedResourceEviction {
|
||||
logger.V(3).Info("Currently, only a single pod eviction is allowed")
|
||||
break
|
||||
}
|
||||
|
||||
evictionCounter++
|
||||
logger.V(3).Info("Evicted pods", "pod", klog.KObj(pod))
|
||||
if unconstrainedResourceEviction {
|
||||
continue
|
||||
}
|
||||
|
||||
subtractPodUsageFromNodeAvailability(totalAvailableUsage, &nodeInfo, podUsage)
|
||||
|
||||
keysAndValues := []any{"node", nodeInfo.node.Name}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
logger.V(3).Info("Updated node usage", keysAndValues...)
|
||||
|
||||
// make sure we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// subtractPodUsageFromNodeAvailability subtracts the pod usage from the node
|
||||
// available resources. this is done to keep track of the remaining resources
|
||||
// that can be used to move pods around.
|
||||
func subtractPodUsageFromNodeAvailability(
|
||||
available api.ReferencedResourceList,
|
||||
nodeInfo *NodeInfo,
|
||||
podUsage api.ReferencedResourceList,
|
||||
) {
|
||||
for name := range available {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
available[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
continue
|
||||
}
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
available[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
@@ -400,7 +407,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
|
||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||
// At least one resource has to be above the high threshold
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.highResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
@@ -410,16 +417,25 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
|
||||
return false
|
||||
}
|
||||
|
||||
// isNodeWithLowUtilization checks if a node is underutilized
|
||||
// All resources have to be below the low threshold
|
||||
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.lowResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
// isNodeAboveThreshold checks if a node is over a threshold
|
||||
// At least one resource has to be above the threshold
|
||||
func isNodeAboveThreshold(usage, threshold api.ResourceThresholds) bool {
|
||||
for name := range threshold {
|
||||
if threshold[name] < usage[name] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isNodeBelowThreshold checks if a node is under a threshold
|
||||
// All resources have to be below the threshold
|
||||
func isNodeBelowThreshold(usage, threshold api.ResourceThresholds) bool {
|
||||
for name := range threshold {
|
||||
if threshold[name] < usage[name] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -432,6 +448,8 @@ func getResourceNames(thresholds api.ResourceThresholds) []v1.ResourceName {
|
||||
return resourceNames
|
||||
}
|
||||
|
||||
// classifyPods classify them in two lists: removable and non-removable.
|
||||
// Removable pods are those that can be evicted.
|
||||
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, removablePods []*v1.Pod
|
||||
|
||||
@@ -446,27 +464,309 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
func averageNodeBasicresources(nodes []*v1.Node, usageClient usageClient) api.ResourceThresholds {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
// assessNodesUsagesAndStaticThresholds converts the raw usage data into
|
||||
// percentage. Returns the usage (pct) and the thresholds (pct) for each
|
||||
// node.
|
||||
func assessNodesUsagesAndStaticThresholds(
|
||||
rawUsages, rawCapacities map[string]api.ReferencedResourceList,
|
||||
lowSpan, highSpan api.ResourceThresholds,
|
||||
) (map[string]api.ResourceThresholds, map[string][]api.ResourceThresholds) {
|
||||
// first we normalize the node usage from the raw data (Mi, Gi, etc)
|
||||
// into api.Percentage values.
|
||||
usage := normalizer.Normalize(
|
||||
rawUsages, rawCapacities, ResourceUsageToResourceThreshold,
|
||||
)
|
||||
|
||||
// we are not taking the average and applying deviations to it we can
|
||||
// simply replicate the same threshold across all nodes and return.
|
||||
thresholds := normalizer.Replicate(
|
||||
slices.Collect(maps.Keys(usage)),
|
||||
[]api.ResourceThresholds{lowSpan, highSpan},
|
||||
)
|
||||
return usage, thresholds
|
||||
}
|
||||
|
||||
// assessNodesUsagesAndRelativeThresholds converts the raw usage data into
|
||||
// percentage. Thresholds are calculated based on the average usage. Returns
|
||||
// the usage (pct) and the thresholds (pct) for each node.
|
||||
func assessNodesUsagesAndRelativeThresholds(
|
||||
rawUsages, rawCapacities map[string]api.ReferencedResourceList,
|
||||
lowSpan, highSpan api.ResourceThresholds,
|
||||
) (map[string]api.ResourceThresholds, map[string][]api.ResourceThresholds) {
|
||||
// first we normalize the node usage from the raw data (Mi, Gi, etc)
|
||||
// into api.Percentage values.
|
||||
usage := normalizer.Normalize(
|
||||
rawUsages, rawCapacities, ResourceUsageToResourceThreshold,
|
||||
)
|
||||
|
||||
// calculate the average usage.
|
||||
average := normalizer.Average(usage)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed average usage",
|
||||
thresholdsToKeysAndValues(average)...,
|
||||
)
|
||||
|
||||
// decrease the provided threshold from the average to get the low
|
||||
// span. also make sure the resulting values are between 0 and 100.
|
||||
lowerThresholds := normalizer.Clamp(
|
||||
normalizer.Sum(average, normalizer.Negate(lowSpan)), 0, 100,
|
||||
)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed thresholds for underutilized nodes",
|
||||
thresholdsToKeysAndValues(lowerThresholds)...,
|
||||
)
|
||||
|
||||
// increase the provided threshold from the average to get the high
|
||||
// span. also make sure the resulting values are between 0 and 100.
|
||||
higherThresholds := normalizer.Clamp(
|
||||
normalizer.Sum(average, highSpan), 0, 100,
|
||||
)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed thresholds for overutilized nodes",
|
||||
thresholdsToKeysAndValues(higherThresholds)...,
|
||||
)
|
||||
|
||||
// replicate the same assessed thresholds to all nodes.
|
||||
thresholds := normalizer.Replicate(
|
||||
slices.Collect(maps.Keys(usage)),
|
||||
[]api.ResourceThresholds{lowerThresholds, higherThresholds},
|
||||
)
|
||||
|
||||
return usage, thresholds
|
||||
}
|
||||
|
||||
// referencedResourceListForNodesCapacity returns a ReferencedResourceList for
|
||||
// the capacity of a list of nodes. If allocatable resources are present, they
|
||||
// are used instead of capacity.
|
||||
func referencedResourceListForNodesCapacity(nodes []*v1.Node) map[string]api.ReferencedResourceList {
|
||||
capacities := map[string]api.ReferencedResourceList{}
|
||||
for _, node := range nodes {
|
||||
usage := usageClient.nodeUtilization(node.Name)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
capacities[node.Name] = referencedResourceListForNodeCapacity(node)
|
||||
}
|
||||
return capacities
|
||||
}
|
||||
|
||||
// referencedResourceListForNodeCapacity returns a ReferencedResourceList for
|
||||
// the capacity of a node. If allocatable resources are present, they are used
|
||||
// instead of capacity.
|
||||
func referencedResourceListForNodeCapacity(node *v1.Node) api.ReferencedResourceList {
|
||||
capacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
capacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
referenced := api.ReferencedResourceList{}
|
||||
for name, quantity := range capacity {
|
||||
referenced[name] = ptr.To(quantity)
|
||||
}
|
||||
|
||||
// XXX the descheduler also manages monitoring queries that are
|
||||
// supposed to return a value representing a percentage of the
|
||||
// resource usage. In this case we need to provide a value for
|
||||
// the MetricResource, which is not present in the node capacity.
|
||||
referenced[MetricResource] = resource.NewQuantity(
|
||||
100, resource.DecimalSI,
|
||||
)
|
||||
|
||||
return referenced
|
||||
}
|
||||
|
||||
// ResourceUsage2ResourceThreshold is an implementation of a Normalizer that
|
||||
// converts a set of resource usages and totals into percentage. This function
|
||||
// operates on Quantity Value() for all the resources except CPU, where it uses
|
||||
// MilliValue().
|
||||
func ResourceUsageToResourceThreshold(
|
||||
usages, totals api.ReferencedResourceList,
|
||||
) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
if value == nil || totals[rname] == nil {
|
||||
continue
|
||||
}
|
||||
for resource, value := range usage {
|
||||
nodeCapacityValue := nodeCapacity[resource]
|
||||
if resource == v1.ResourceCPU {
|
||||
total[resource] += api.Percentage(value.MilliValue()) / api.Percentage(nodeCapacityValue.MilliValue()) * 100.0
|
||||
} else {
|
||||
total[resource] += api.Percentage(value.Value()) / api.Percentage(nodeCapacityValue.Value()) * 100.0
|
||||
|
||||
total := totals[rname]
|
||||
used, capacity := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, capacity = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
var percent float64
|
||||
if capacity > 0 {
|
||||
percent = float64(used) / float64(capacity) * 100
|
||||
}
|
||||
|
||||
result[rname] = api.Percentage(percent)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// uniquifyResourceNames returns a slice of resource names with duplicates
|
||||
// removed.
|
||||
func uniquifyResourceNames(resourceNames []v1.ResourceName) []v1.ResourceName {
|
||||
resourceNamesMap := map[v1.ResourceName]bool{
|
||||
v1.ResourceCPU: true,
|
||||
v1.ResourceMemory: true,
|
||||
v1.ResourcePods: true,
|
||||
}
|
||||
for _, resourceName := range resourceNames {
|
||||
resourceNamesMap[resourceName] = true
|
||||
}
|
||||
return slices.Collect(maps.Keys(resourceNamesMap))
|
||||
}
|
||||
|
||||
// filterResourceNamesFromNodeUsage removes from the node usage slice all keys
|
||||
// that are not present in the resourceNames slice.
|
||||
func filterResourceNames(
|
||||
from map[string]api.ReferencedResourceList, resourceNames []v1.ResourceName,
|
||||
) map[string]api.ReferencedResourceList {
|
||||
newNodeUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName, usage := range from {
|
||||
newNodeUsage[nodeName] = api.ReferencedResourceList{}
|
||||
for _, resourceName := range resourceNames {
|
||||
if _, exists := usage[resourceName]; exists {
|
||||
newNodeUsage[nodeName][resourceName] = usage[resourceName]
|
||||
}
|
||||
}
|
||||
}
|
||||
for resource, value := range total {
|
||||
average[resource] = value / api.Percentage(numberOfNodes)
|
||||
}
|
||||
return average
|
||||
return newNodeUsage
|
||||
}
|
||||
|
||||
// capNodeCapacitiesToThreshold caps the node capacities to the given
|
||||
// thresholds. if a threshold is not set for a resource, the full capacity is
|
||||
// returned.
|
||||
func capNodeCapacitiesToThreshold(
|
||||
node *v1.Node,
|
||||
thresholds api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
) api.ReferencedResourceList {
|
||||
capped := api.ReferencedResourceList{}
|
||||
for _, resourceName := range resourceNames {
|
||||
capped[resourceName] = capNodeCapacityToThreshold(
|
||||
node, thresholds, resourceName,
|
||||
)
|
||||
}
|
||||
return capped
|
||||
}
|
||||
|
||||
// capNodeCapacityToThreshold caps the node capacity to the given threshold. if
|
||||
// no threshold is set for the resource, the full capacity is returned.
|
||||
func capNodeCapacityToThreshold(
|
||||
node *v1.Node, thresholds api.ResourceThresholds, resourceName v1.ResourceName,
|
||||
) *resource.Quantity {
|
||||
capacities := referencedResourceListForNodeCapacity(node)
|
||||
if _, ok := capacities[resourceName]; !ok {
|
||||
// if the node knows nothing about the resource we return a
|
||||
// zero capacity for it.
|
||||
return resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// if no threshold is set then we simply return the full capacity.
|
||||
if _, ok := thresholds[resourceName]; !ok {
|
||||
return capacities[resourceName]
|
||||
}
|
||||
|
||||
// now that we have a capacity and a threshold we need to do the math
|
||||
// to cap the former to the latter.
|
||||
quantity := capacities[resourceName]
|
||||
threshold := thresholds[resourceName]
|
||||
|
||||
// we have a different format for memory. all the other resources are
|
||||
// in the DecimalSI format.
|
||||
format := resource.DecimalSI
|
||||
if resourceName == v1.ResourceMemory {
|
||||
format = resource.BinarySI
|
||||
}
|
||||
|
||||
// this is what we use to cap the capacity. thresholds are expected to
|
||||
// be in the <0;100> interval.
|
||||
fraction := func(threshold api.Percentage, capacity int64) int64 {
|
||||
return int64(float64(threshold) * 0.01 * float64(capacity))
|
||||
}
|
||||
|
||||
// here we also vary a little bit. milli is used for cpu, all the rest
|
||||
// goes with the default.
|
||||
if resourceName == v1.ResourceCPU {
|
||||
return resource.NewMilliQuantity(
|
||||
fraction(threshold, quantity.MilliValue()),
|
||||
format,
|
||||
)
|
||||
}
|
||||
|
||||
return resource.NewQuantity(
|
||||
fraction(threshold, quantity.Value()),
|
||||
format,
|
||||
)
|
||||
}
|
||||
|
||||
// assessAvailableResourceInNodes computes the available resources in all the
|
||||
// nodes. this is done by summing up all the available resources in all the
|
||||
// nodes and then subtracting the usage from it.
|
||||
func assessAvailableResourceInNodes(
|
||||
nodes []NodeInfo, resources []v1.ResourceName,
|
||||
) (api.ReferencedResourceList, error) {
|
||||
// available holds a sum of all the resources that can be used to move
|
||||
// pods around. e.g. the sum of all available cpu and memory in all
|
||||
// cluster nodes.
|
||||
available := api.ReferencedResourceList{}
|
||||
for _, node := range nodes {
|
||||
for _, resourceName := range resources {
|
||||
if _, exists := node.usage[resourceName]; !exists {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s resource in node's %s usage, terminating eviction",
|
||||
resourceName, node.node.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX this should never happen. we better bail out
|
||||
// here than hard crash with a segfault.
|
||||
if node.usage[resourceName] == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s usage resources, terminating eviction",
|
||||
resourceName,
|
||||
)
|
||||
}
|
||||
|
||||
// keep the current usage around so we can subtract it
|
||||
// from the available resources.
|
||||
usage := *node.usage[resourceName]
|
||||
|
||||
// first time seeing this resource, initialize it.
|
||||
if _, ok := available[resourceName]; !ok {
|
||||
available[resourceName] = resource.NewQuantity(
|
||||
0, resource.DecimalSI,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX this should never happen. we better bail out
|
||||
// here than hard crash with a segfault.
|
||||
if node.available[resourceName] == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s available resources, terminating eviction",
|
||||
resourceName,
|
||||
)
|
||||
}
|
||||
|
||||
// now we add the capacity and then subtract the usage.
|
||||
available[resourceName].Add(*node.available[resourceName])
|
||||
available[resourceName].Sub(usage)
|
||||
}
|
||||
}
|
||||
|
||||
return available, nil
|
||||
}
|
||||
|
||||
// withResourceRequestForAny returns a filter function that checks if a pod
|
||||
// has a resource request specified for any of the given resources names.
|
||||
func withResourceRequestForAny(names ...v1.ResourceName) pod.FilterFunc {
|
||||
return func(pod *v1.Pod) bool {
|
||||
all := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||
for _, name := range names {
|
||||
for _, container := range all {
|
||||
if _, ok := container.Resources.Requests[name]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,11 +18,16 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
)
|
||||
|
||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||
@@ -55,44 +60,6 @@ var (
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
)
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
})
|
||||
|
||||
expectedUsageInIntPercentage := map[v1.ResourceName]float64{
|
||||
v1.ResourceCPU: 63,
|
||||
v1.ResourceMemory: 90,
|
||||
v1.ResourcePods: 37,
|
||||
}
|
||||
|
||||
for resourceName, percentage := range expectedUsageInIntPercentage {
|
||||
if math.Floor(resourceUsagePercentage[resourceName]) != percentage {
|
||||
t.Errorf("Incorrect percentange computation, expected %v, got math.Floor(%v) instead", percentage, resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -103,21 +70,21 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
name: "cpu memory pods",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
@@ -130,17 +97,17 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
name: "memory",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
@@ -171,3 +138,442 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUsageToResourceThreshold(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage api.ReferencedResourceList
|
||||
capacity api.ReferencedResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "10 percent",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 10},
|
||||
},
|
||||
{
|
||||
name: "zeroed out capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 0},
|
||||
},
|
||||
{
|
||||
name: "non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "existing and non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 20},
|
||||
},
|
||||
{
|
||||
name: "nil usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "nil capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ResourceUsageToResourceThreshold(tt.usage, tt.capacity)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// This is a test for thresholds being defined as deviations from the average
|
||||
// usage. This is expected to be a little longer test case. We are going to
|
||||
// comment the steps to make it easier to follow.
|
||||
func TestClassificationUsingDeviationThresholds(t *testing.T) {
|
||||
// These are the two thresholds defined by the user. These thresholds
|
||||
// mean that our low limit will be 5 pct points below the average and
|
||||
// the high limit will be 5 pct points above the average.
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
// Create a fake total amount of resources for all nodes. We define
|
||||
// the total amount to 1000 for both memory and cpu. This is so we
|
||||
// can easily calculate (manually) the percentage of usages here.
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("1000"),
|
||||
},
|
||||
)
|
||||
|
||||
// Create a fake usage per server per resource. We are aiming to
|
||||
// have the average of these resources in 50%. When applying the
|
||||
// thresholds we should obtain the low threhold at 45% and the high
|
||||
// threshold at 55%.
|
||||
nodesUsage := map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("480"),
|
||||
v1.ResourceMemory: resource.MustParse("480"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("520"),
|
||||
v1.ResourceMemory: resource.MustParse("520"),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: resource.MustParse("500"),
|
||||
v1.ResourceMemory: resource.MustParse("500"),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: resource.MustParse("900"),
|
||||
v1.ResourceMemory: resource.MustParse("900"),
|
||||
},
|
||||
}
|
||||
|
||||
// Normalize the usage to percentages and then calculate the average
|
||||
// among all nodes.
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ResourceListUsageNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
// Create the thresholds by first applying the deviations and then
|
||||
// replicating once for each node. Thresholds are supposed to be per
|
||||
// node even though the user provides them only once. This is by
|
||||
// design as it opens the possibility for further implementations of
|
||||
// thresholds per node.
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
// Classify the nodes according to the thresholds. Nodes below the low
|
||||
// threshold (45%) are underutilized, nodes above the high threshold
|
||||
// (55%) are overutilized and nodes in between are properly utilized.
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
// we expect the node1 to be undertilized (10%), node2, node3 and node4
|
||||
// to be properly utilized (48%, 52% and 50% respectively) and node5 to
|
||||
// be overutilized (90%).
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// This is almost a copy of TestUsingDeviationThresholds but we are using
|
||||
// pointers here. This is for making sure our generic types are in check. To
|
||||
// understand this code better read comments on TestUsingDeviationThresholds.
|
||||
func TestUsingDeviationThresholdsWithPointers(t *testing.T) {
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("1000")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("1000")),
|
||||
},
|
||||
)
|
||||
|
||||
nodesUsage := map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("100")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("100")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("480")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("480")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("520")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("520")),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("500")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("500")),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("900")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("900")),
|
||||
},
|
||||
}
|
||||
|
||||
ptrNormalizer := func(
|
||||
usages, totals map[v1.ResourceName]*resource.Quantity,
|
||||
) api.ResourceThresholds {
|
||||
newUsages := v1.ResourceList{}
|
||||
for name, usage := range usages {
|
||||
newUsages[name] = *usage
|
||||
}
|
||||
newTotals := v1.ResourceList{}
|
||||
for name, total := range totals {
|
||||
newTotals[name] = *total
|
||||
}
|
||||
return ResourceListUsageNormalizer(newUsages, newTotals)
|
||||
}
|
||||
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ptrNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeAndClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
thresholds map[string][]api.ResourceThresholds
|
||||
expected []map[string]api.ResourceThresholds
|
||||
classifiers []classifier.Classifier[string, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "happy path test",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// underutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
"node2": {
|
||||
// overutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// properly utilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node4": {
|
||||
// underutilized on cpu and overutilized on memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "three thresholds",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
"node2": {
|
||||
// match for the third classifier.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// match fo the second classifier.
|
||||
v1.ResourceCPU: resource.MustParse("40"),
|
||||
v1.ResourceMemory: resource.MustParse("40"),
|
||||
},
|
||||
"node4": {
|
||||
// matches no classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node5": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("11"),
|
||||
v1.ResourceMemory: resource.MustParse("18"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 50, v1.ResourceMemory: 50},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
"node5": {v1.ResourceCPU: 11, v1.ResourceMemory: 18},
|
||||
},
|
||||
{
|
||||
"node3": {v1.ResourceCPU: 40, v1.ResourceMemory: 40},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pct := normalizer.Normalize(tt.usage, tt.totals, ResourceListUsageNormalizer)
|
||||
res := classifier.Classify(pct, tt.thresholds, tt.classifiers...)
|
||||
if !reflect.DeepEqual(res, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", res, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Normalizer is a function that receives two values of the same type and
|
||||
// return an object of a different type. An usage case can be a function
|
||||
// that converts a memory usage from mb to % (the first argument would be
|
||||
// the memory usage in mb and the second argument would be the total memory
|
||||
// available in mb).
|
||||
type Normalizer[V, N any] func(V, V) N
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Number is an interface that represents a number. Represents things we
|
||||
// can do math operations on.
|
||||
type Number interface {
|
||||
constraints.Integer | constraints.Float
|
||||
}
|
||||
|
||||
// Normalize uses a Normalizer function to normalize a set of values. For
|
||||
// example one may want to convert a set of memory usages from mb to %.
|
||||
// This function receives a set of usages, a set of totals, and a Normalizer
|
||||
// function. The function will return a map with the normalized values.
|
||||
func Normalize[K comparable, V, N any](usages, totals Values[K, V], fn Normalizer[V, N]) map[K]N {
|
||||
result := Values[K, N]{}
|
||||
for key, value := range usages {
|
||||
total, ok := totals[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
result[key] = fn(value, total)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Replicate replicates the provide value for each key in the provided slice.
|
||||
// Returns a map with the keys and the provided value.
|
||||
func Replicate[K comparable, V any](keys []K, value V) map[K]V {
|
||||
result := map[K]V{}
|
||||
for _, key := range keys {
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Clamp imposes minimum and maximum limits on a set of values. The function
|
||||
// will return a set of values where each value is between the minimum and
|
||||
// maximum values (included). Values below minimum are rounded up to the
|
||||
// minimum value, and values above maximum are rounded down to the maximum
|
||||
// value.
|
||||
func Clamp[K comparable, N Number, V ~map[K]N](values V, minimum, maximum N) V {
|
||||
result := V{}
|
||||
for key := range values {
|
||||
value := values[key]
|
||||
value = N(math.Max(float64(value), float64(minimum)))
|
||||
value = N(math.Min(float64(value), float64(maximum)))
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Map applies a function to each element of a map of values. Returns a new
|
||||
// slice with the results of applying the function to each element.
|
||||
func Map[K comparable, N Number, V ~map[K]N](items []V, fn func(V) V) []V {
|
||||
result := []V{}
|
||||
for _, item := range items {
|
||||
result = append(result, fn(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Negate converts the values of a map to their negated values.
|
||||
func Negate[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = -value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Round rounds the values of a map to the nearest integer. Calls math.Round on
|
||||
// each value of the map.
|
||||
func Round[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = N(math.Round(float64(value)))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Sum sums up the values of two maps. Values are expected to be of Number
|
||||
// type. Original values are preserved. If a key is present in one map but
|
||||
// not in the other, the key is ignored.
|
||||
func Sum[K comparable, N Number, V ~map[K]N](mapA, mapB V) V {
|
||||
result := V{}
|
||||
for name, value := range mapA {
|
||||
result[name] = value + mapB[name]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Average calculates the average of a set of values. This function receives
|
||||
// a map of values and returns the average of all the values. Average expects
|
||||
// the values to represent the same unit of measure. You can use this function
|
||||
// after Normalizing the values.
|
||||
func Average[J, K comparable, N Number, V ~map[J]N](values map[K]V) V {
|
||||
counter := map[J]int{}
|
||||
result := V{}
|
||||
for _, imap := range values {
|
||||
for name, value := range imap {
|
||||
result[name] += value
|
||||
counter[name]++
|
||||
}
|
||||
}
|
||||
|
||||
for name := range result {
|
||||
result[name] /= N(counter[name])
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -0,0 +1,649 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestNormalizeSimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]float64
|
||||
totals map[string]float64
|
||||
expected map[string]float64
|
||||
normalizer Normalizer[float64, float64]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]float64{"cpu": 1},
|
||||
totals: map[string]float64{"cpu": 2},
|
||||
expected: map[string]float64{"cpu": 0.5},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple normalizations",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
"mem": 10,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
"mem": 0.6,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing totals for a key",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
expected map[string]api.ResourceThresholds
|
||||
normalizer Normalizer[v1.ResourceList, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("1")},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {v1.ResourceCPU: 50},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
v1.ResourcePods: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("120"),
|
||||
v1.ResourceMemory: resource.MustParse("130"),
|
||||
v1.ResourcePods: resource.MustParse("140"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("150"),
|
||||
v1.ResourceMemory: resource.MustParse("160"),
|
||||
v1.ResourcePods: resource.MustParse("170"),
|
||||
},
|
||||
},
|
||||
totals: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
expected: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 100,
|
||||
},
|
||||
),
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage and different totals",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("99"),
|
||||
v1.ResourceMemory: resource.MustParse("99Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 99,
|
||||
v1.ResourceMemory: 99,
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAverage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string]v1.ResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "empty usage",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string]v1.ResourceList{},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "fifty percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("60"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
limits: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("10000"),
|
||||
},
|
||||
),
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 70,
|
||||
v1.ResourcePods: 0.2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("72"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("180"),
|
||||
v1.ResourcePods: resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50.5,
|
||||
v1.ResourceMemory: 35,
|
||||
v1.ResourcePods: 60,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some nodes missing some resources",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node5": {
|
||||
"random-usage-without-limit": resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node5": {
|
||||
"random-limit-without-usage": resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
"limit-exists-in-all": 10,
|
||||
"limit-exists-in-two": 11,
|
||||
"usage-exists-in-all": 13,
|
||||
"usage-exists-in-two": 20,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
average := Average(
|
||||
Normalize(
|
||||
tt.usage, tt.limits, ResourceListUsageNormalizer,
|
||||
),
|
||||
)
|
||||
if !reflect.DeepEqual(average, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expected: %v", average, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data api.ResourceThresholds
|
||||
deviations []api.ResourceThresholds
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "single deviation",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 3,
|
||||
v1.ResourceMemory: 3,
|
||||
v1.ResourcePods: 3,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 53,
|
||||
v1.ResourceMemory: 53,
|
||||
v1.ResourcePods: 53,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deviate with negative values",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: -2,
|
||||
v1.ResourceMemory: -2,
|
||||
v1.ResourcePods: -2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: -1,
|
||||
v1.ResourceMemory: -1,
|
||||
v1.ResourcePods: -1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 0,
|
||||
v1.ResourceMemory: 0,
|
||||
v1.ResourcePods: 0,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 48,
|
||||
v1.ResourceMemory: 48,
|
||||
v1.ResourcePods: 48,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 49,
|
||||
v1.ResourceMemory: 49,
|
||||
v1.ResourcePods: 49,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := []api.ResourceThresholds{}
|
||||
for _, deviation := range tt.deviations {
|
||||
partial := Sum(tt.data, deviation)
|
||||
result = append(result, partial)
|
||||
}
|
||||
|
||||
if len(result) != len(tt.deviations) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
fmt.Printf("%T, %T\n", result, tt.expected)
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClamp(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data []api.ResourceThresholds
|
||||
minimum api.Percentage
|
||||
maximum api.Percentage
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "all over the limit",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some over some below the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 7,
|
||||
v1.ResourceMemory: 8,
|
||||
v1.ResourcePods: 88,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 10,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all within the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fn := func(thresholds api.ResourceThresholds) api.ResourceThresholds {
|
||||
return Clamp(thresholds, tt.minimum, tt.maximum)
|
||||
}
|
||||
result := Map(tt.data, fn)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,18 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// EvictionMode describe a mode of eviction. See the list below for the
|
||||
// available modes.
|
||||
type EvictionMode string
|
||||
|
||||
const (
|
||||
// EvictionModeOnlyThresholdingResources makes the descheduler evict
|
||||
// only pods that have a resource request defined for any of the user
|
||||
// provided thresholds. If the pod does not request the resource, it
|
||||
// will not be evicted.
|
||||
EvictionModeOnlyThresholdingResources EvictionMode = "OnlyThresholdingResources"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@@ -28,12 +40,15 @@ type LowNodeUtilizationArgs struct {
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
MetricsUtilization *MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
|
||||
// evictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
EvictionLimits *api.EvictionLimits `json:"evictionLimits,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
@@ -42,9 +57,15 @@ type LowNodeUtilizationArgs struct {
|
||||
type HighNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
|
||||
// EvictionModes is a set of modes to be taken into account when the
|
||||
// descheduler evicts pods. For example the mode
|
||||
// `OnlyThresholdingResources` can be used to make sure the descheduler
|
||||
// only evicts pods who have resource requests for the defined
|
||||
// thresholds.
|
||||
EvictionModes []EvictionMode `json:"evictionModes,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
@@ -53,8 +74,24 @@ type HighNodeUtilizationArgs struct {
|
||||
}
|
||||
|
||||
// MetricsUtilization allow to consume actual resource utilization from metrics
|
||||
// +k8s:deepcopy-gen=true
|
||||
type MetricsUtilization struct {
|
||||
// metricsServer enables metrics from a kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
// Deprecated. Use Source instead.
|
||||
MetricsServer bool `json:"metricsServer,omitempty"`
|
||||
|
||||
// source enables the plugin to consume metrics from a metrics source.
|
||||
// Currently only KubernetesMetrics available.
|
||||
Source api.MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// prometheus enables metrics collection through a prometheus query.
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
// query returning a vector of samples, each sample labeled with `instance`
|
||||
// corresponding to a node name with each sample value as a real number
|
||||
// in <0; 1> interval.
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
@@ -19,27 +19,54 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
type UsageClientType int
|
||||
|
||||
const (
|
||||
requestedUsageClientType UsageClientType = iota
|
||||
actualUsageClientType
|
||||
prometheusUsageClientType
|
||||
)
|
||||
|
||||
type notSupportedError struct {
|
||||
usageClientType UsageClientType
|
||||
}
|
||||
|
||||
func (e notSupportedError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func newNotSupportedError(usageClientType UsageClientType) *notSupportedError {
|
||||
return ¬SupportedError{
|
||||
usageClientType: usageClientType,
|
||||
}
|
||||
}
|
||||
|
||||
type usageClient interface {
|
||||
// Both low/high node utilization plugins are expected to invoke sync right
|
||||
// after Balance method is invoked. There's no cache invalidation so each
|
||||
// Balance is expected to get the latest data by invoking sync.
|
||||
sync(nodes []*v1.Node) error
|
||||
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
|
||||
sync(ctx context.Context, nodes []*v1.Node) error
|
||||
nodeUtilization(node string) api.ReferencedResourceList
|
||||
pods(node string) []*v1.Pod
|
||||
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
|
||||
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
|
||||
}
|
||||
|
||||
type requestedUsageClient struct {
|
||||
@@ -47,7 +74,7 @@ type requestedUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &requestedUsageClient{}
|
||||
@@ -62,7 +89,7 @@ func newRequestedUsageClient(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return s._nodeUtilization[node]
|
||||
}
|
||||
|
||||
@@ -70,16 +97,16 @@ func (s *requestedUsageClient) pods(node string) []*v1.Pod {
|
||||
return s._pods[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
usage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
usage := make(api.ReferencedResourceList)
|
||||
for _, resourceName := range s.resourceNames {
|
||||
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
s._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -111,7 +138,7 @@ type actualUsageClient struct {
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
@@ -128,7 +155,7 @@ func newActualUsageClient(
|
||||
}
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
@@ -136,7 +163,7 @@ func (client *actualUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
||||
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
||||
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -144,7 +171,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
||||
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
totalUsage := make(api.ReferencedResourceList)
|
||||
for _, container := range podMetrics.Containers {
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if resourceName == v1.ResourcePods {
|
||||
@@ -164,8 +191,8 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
||||
return totalUsage, nil
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
func (client *actualUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
||||
@@ -180,18 +207,19 @@ func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
nodeUsage, ok := nodesUsage[node.Name]
|
||||
collectedNodeUsage, ok := nodesUsage[node.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
nodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
collectedNodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
|
||||
nodeUsage := api.ReferencedResourceList{}
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if _, exists := nodeUsage[resourceName]; !exists {
|
||||
if _, exists := collectedNodeUsage[resourceName]; !exists {
|
||||
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
|
||||
}
|
||||
nodeUsage[resourceName] = collectedNodeUsage[resourceName]
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsage
|
||||
@@ -199,3 +227,96 @@ func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type prometheusUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
promClient promapi.Client
|
||||
promQuery string
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newPrometheusUsageClient(
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
promClient promapi.Client,
|
||||
promQuery string,
|
||||
) *prometheusUsageClient {
|
||||
return &prometheusUsageClient{
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
promClient: promClient,
|
||||
promQuery: promQuery,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
return nil, newNotSupportedError(prometheusUsageClientType)
|
||||
}
|
||||
|
||||
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
logger.Info("prometheus metrics warnings: %v", warnings)
|
||||
}
|
||||
|
||||
if results.Type() != model.ValVector {
|
||||
return nil, fmt.Errorf("expected query results to be of type %q, got %q instead", model.ValVector, results.Type())
|
||||
}
|
||||
|
||||
nodeUsages := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for _, sample := range results.(model.Vector) {
|
||||
nodeName, exists := sample.Metric["instance"]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key")
|
||||
}
|
||||
if sample.Value < 0 || sample.Value > 1 {
|
||||
return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value)
|
||||
}
|
||||
nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{
|
||||
MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
return nodeUsages, nil
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodeUsages, err := NodeUsageFromPrometheusMetrics(ctx, client.promClient, client.promQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodeUsages[node.Name]; !exists {
|
||||
return fmt.Errorf("unable to find metric entry for %v", node.Name)
|
||||
}
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsages[node.Name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,14 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -58,9 +63,9 @@ func updateMetricsAndCheckNodeUtilization(
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture metrics: %v", err)
|
||||
}
|
||||
err = usageClient.sync(nodes)
|
||||
err = usageClient.sync(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture a snapshot: %v", err)
|
||||
t.Fatalf("failed to sync a snapshot: %v", err)
|
||||
}
|
||||
nodeUtilization := usageClient.nodeUtilization(nodeName)
|
||||
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
@@ -137,3 +142,158 @@ func TestActualUsageClient(t *testing.T) {
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
}
|
||||
|
||||
type fakePromClient struct {
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
}
|
||||
|
||||
type fakePayload struct {
|
||||
Status string `json:"status"`
|
||||
Data queryResult `json:"data"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
func (client *fakePromClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return &url.URL{}
|
||||
}
|
||||
|
||||
func (client *fakePromClient) Do(ctx context.Context, request *http.Request) (*http.Response, []byte, error) {
|
||||
jsonData, err := json.Marshal(fakePayload{
|
||||
Status: "success",
|
||||
Data: queryResult{
|
||||
Type: client.dataType,
|
||||
Result: client.result,
|
||||
},
|
||||
})
|
||||
|
||||
return &http.Response{StatusCode: 200}, jsonData, err
|
||||
}
|
||||
|
||||
func sample(metricName, nodeName string, value float64) *model.Sample {
|
||||
return &model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue(metricName),
|
||||
"instance": model.LabelValue(nodeName),
|
||||
},
|
||||
Value: model.SampleValue(value),
|
||||
Timestamp: 1728991761711,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("ip-10-0-17-165.ec2.internal", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("ip-10-0-51-101.ec2.internal", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("ip-10-0-94-25.ec2.internal", 2000, 3000, 10, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
nodeUsage map[string]int64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "valid data",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 0.20381818181818104),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-17-165.ec2.internal", 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-94-25.ec2.internal", 0.5695757575757561),
|
||||
},
|
||||
nodeUsage: map[string]int64{
|
||||
"ip-10-0-51-101.ec2.internal": 20,
|
||||
"ip-10-0-17-165.ec2.internal": 42,
|
||||
"ip-10-0-94-25.ec2.internal": 56,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid data missing instance label",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue("instance:node_cpu:rate:sum"),
|
||||
},
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample is missing 'instance' key"),
|
||||
},
|
||||
{
|
||||
name: "invalid data value out of range",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104),
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"),
|
||||
},
|
||||
{
|
||||
name: "invalid data not a vector",
|
||||
dataType: model.ValScalar,
|
||||
result: model.Scalar{
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
err: fmt.Errorf("expected query results to be of type \"vector\", got \"scalar\" instead"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pClient := &fakePromClient{
|
||||
result: tc.result,
|
||||
dataType: tc.dataType,
|
||||
}
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
prometheusUsageClient := newPrometheusUsageClient(podsAssignedToNode, pClient, "instance:node_cpu:rate:sum")
|
||||
err = prometheusUsageClient.sync(ctx, nodes)
|
||||
if tc.err == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected %q error, got nil instead", tc.err)
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("expected %q error, got %q instead", tc.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUtil := prometheusUsageClient.nodeUtilization(node.Name)
|
||||
if nodeUtil[MetricResource].Value() != tc.nodeUsage[node.Name] {
|
||||
t.Fatalf("expected %q node utilization to be %v, got %v instead", node.Name, tc.nodeUsage[node.Name], nodeUtil[MetricResource])
|
||||
} else {
|
||||
t.Logf("%v node utilization: %v", node.Name, nodeUtil[MetricResource])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,25 @@ func ValidateHighNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make sure we know about the eviction modes defined by the user.
|
||||
return validateEvictionModes(args.EvictionModes)
|
||||
}
|
||||
|
||||
// validateEvictionModes checks if the eviction modes are valid/known
|
||||
// to the descheduler.
|
||||
func validateEvictionModes(modes []EvictionMode) error {
|
||||
// we are using this approach to make the code more extensible
|
||||
// in the future.
|
||||
validModes := map[EvictionMode]bool{
|
||||
EvictionModeOnlyThresholdingResources: true,
|
||||
}
|
||||
|
||||
for _, mode := range modes {
|
||||
if validModes[mode] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("invalid eviction mode %s", mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +62,17 @@ func ValidateLowNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.MetricsUtilization != nil {
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.MetricsServer {
|
||||
return fmt.Errorf("it is not allowed to set both %q source and metricsServer", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.Prometheus != nil {
|
||||
return fmt.Errorf("prometheus configuration is not allowed to set when source is set to %q", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.PrometheusMetrics && (args.MetricsUtilization.Prometheus == nil || args.MetricsUtilization.Prometheus.Query == "") {
|
||||
return fmt.Errorf("prometheus query is required when metrics source is set to %q", api.PrometheusMetrics)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,164 +21,374 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
name string
|
||||
args *LowNodeUtilizationArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "setting both kubernetes metrics source and metricsserver",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("it is not allowed to set both \"KubernetesMetrics\" source and metricsServer"),
|
||||
},
|
||||
{
|
||||
name: "missing prometheus query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus query is required when metrics source is set to \"Prometheus\""),
|
||||
},
|
||||
{
|
||||
name: "prometheus set when source set to kubernetes metrics",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
Prometheus: &Prometheus{},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus configuration is not allowed to set when source is set to \"KubernetesMetrics\""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
args := &LowNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
TargetThresholds: testCase.targetThresholds,
|
||||
}
|
||||
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateLowNodeUtilizationArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHighNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args *HighNodeUtilizationArgs
|
||||
wantErr bool
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid configuration with CPU and memory",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 90,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid configuration with extended resource",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 85,
|
||||
extendedResource: 95,
|
||||
},
|
||||
EvictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty thresholds",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "no resource threshold is configured",
|
||||
},
|
||||
{
|
||||
name: "threshold below minimum (0%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: -1,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "cpu threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "threshold above maximum (100%)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceMemory: 101,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "multiple thresholds with one out of range",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 150,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "memory threshold not in [0, 100] range",
|
||||
},
|
||||
{
|
||||
name: "evictableNamespaces with Exclude (allowed)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictableNamespaces: &api.Namespaces{
|
||||
Exclude: []string{"ns1", "ns2"},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid eviction mode",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{"InvalidMode"},
|
||||
},
|
||||
wantErr: true,
|
||||
errMsg: "invalid eviction mode InvalidMode",
|
||||
},
|
||||
{
|
||||
name: "missing eviction modes (nil) - should be allowed (treated as empty)",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty eviction modes slice - should be allowed",
|
||||
args: &HighNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
},
|
||||
EvictionModes: []EvictionMode{},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := ValidateHighNodeUtilizationArgs(runtime.Object(tc.args))
|
||||
|
||||
if tc.wantErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, but got nil")
|
||||
}
|
||||
if tc.errMsg != "" && err.Error() != tc.errMsg {
|
||||
t.Errorf("expected error message: %q, but got: %q", tc.errMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,11 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.EvictionModes != nil {
|
||||
in, out := &in.EvictionModes, &out.EvictionModes
|
||||
*out = make([]EvictionMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
@@ -82,12 +86,21 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.MetricsUtilization != nil {
|
||||
in, out := &in.MetricsUtilization, &out.MetricsUtilization
|
||||
*out = new(MetricsUtilization)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictionLimits != nil {
|
||||
in, out := &in.EvictionLimits, &out.EvictionLimits
|
||||
*out = new(api.EvictionLimits)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -108,3 +121,24 @@ func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsUtilization) DeepCopyInto(out *MetricsUtilization) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsUtilization.
|
||||
func (in *MetricsUtilization) DeepCopy() *MetricsUtilization {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsUtilization)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -38,17 +38,19 @@ var _ frameworktypes.DeschedulePlugin = &PodLifeTime{}
|
||||
|
||||
// PodLifeTime evicts pods on the node that violate the max pod lifetime threshold
|
||||
type PodLifeTime struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *PodLifeTimeArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
podLifeTimeArgs, ok := args.(*PodLifeTimeArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type PodLifeTimeArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if podLifeTimeArgs.Namespaces != nil {
|
||||
@@ -115,6 +117,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &PodLifeTime{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: podLifeTimeArgs,
|
||||
@@ -130,9 +133,9 @@ func (d *PodLifeTime) Name() string {
|
||||
func (d *PodLifeTime) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
podsToEvict := make([]*v1.Pod, 0)
|
||||
nodeMap := make(map[string]*v1.Node, len(nodes))
|
||||
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -161,7 +164,7 @@ loop:
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -577,6 +577,32 @@ func TestPodLifeTime(t *testing.T) {
|
||||
pods[0].Status.Reason = "UnexpectedAdmissionError"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status phase v1.PodSucceeded should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{string(v1.PodSucceeded)},
|
||||
},
|
||||
pods: []*v1.Pod{p16},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Phase = v1.PodSucceeded
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status phase v1.PodUnknown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: &maxLifeTime,
|
||||
States: []string{string(v1.PodFailed)},
|
||||
},
|
||||
pods: []*v1.Pod{p16},
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 1,
|
||||
applyPodsFunc: func(pods []*v1.Pod) {
|
||||
pods[0].Status.Phase = v1.PodFailed
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "1 pod with pod status phase v1.PodUnknown should be evicted",
|
||||
args: &PodLifeTimeArgs{
|
||||
@@ -643,7 +669,7 @@ func TestPodLifeTime(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(tc.args, handle)
|
||||
plugin, err := New(ctx, tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,35 +18,39 @@ package podlifetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidatePodLifeTimeArgs validates PodLifeTime arguments
|
||||
func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
args := obj.(*PodLifeTimeArgs)
|
||||
var allErrs []error
|
||||
if args.MaxPodLifeTimeSeconds == nil {
|
||||
return fmt.Errorf("MaxPodLifeTimeSeconds not set")
|
||||
allErrs = append(allErrs, fmt.Errorf("MaxPodLifeTimeSeconds not set"))
|
||||
}
|
||||
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
podLifeTimeAllowedStates := sets.New(
|
||||
// Pod Status Phase
|
||||
string(v1.PodRunning),
|
||||
string(v1.PodPending),
|
||||
string(v1.PodSucceeded),
|
||||
string(v1.PodFailed),
|
||||
string(v1.PodUnknown),
|
||||
|
||||
// Pod Status Reasons
|
||||
@@ -70,8 +74,10 @@ func ValidatePodLifeTimeArgs(obj runtime.Object) error {
|
||||
)
|
||||
|
||||
if !podLifeTimeAllowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", podLifeTimeAllowedStates.UnsortedList())
|
||||
allowed := podLifeTimeAllowedStates.UnsortedList()
|
||||
sort.Strings(allowed)
|
||||
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package podlifetime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -26,7 +27,7 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *PodLifeTimeArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid arg, no errors",
|
||||
@@ -34,7 +35,13 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string(v1.PodRunning)},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons Succeeded or Failed",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string(v1.PodSucceeded), string(v1.PodFailed)},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod Status Reasons CrashLoopBackOff ",
|
||||
@@ -42,31 +49,41 @@ func TestValidateRemovePodLifeTimeArgs(t *testing.T) {
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{"CrashLoopBackOff"},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: nil,
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("MaxPodLifeTimeSeconds not set"),
|
||||
},
|
||||
{
|
||||
description: "invalid pod state arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
States: []string{string(v1.NodeRunning)},
|
||||
MaxPodLifeTimeSeconds: func(i uint) *uint { return &i }(1),
|
||||
States: []string{string("InvalidState")},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]"),
|
||||
},
|
||||
{
|
||||
description: "nil MaxPodLifeTimeSeconds arg and invalid pod state arg, expects errors",
|
||||
args: &PodLifeTimeArgs{
|
||||
MaxPodLifeTimeSeconds: nil,
|
||||
States: []string{string("InvalidState")},
|
||||
},
|
||||
errInfo: fmt.Errorf("[MaxPodLifeTimeSeconds not set, states must be one of [ContainerCreating CrashLoopBackOff CreateContainerConfigError CreateContainerError ErrImagePull Failed ImagePullBackOff InvalidImageName NodeAffinity NodeLost Pending PodInitializing Running Shutdown Succeeded UnexpectedAdmissionError Unknown]]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidatePodLifeTimeArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidatePodLifeTimeArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -45,6 +45,7 @@ const PluginName = "RemoveDuplicates"
|
||||
// As of now, this plugin won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveDuplicatesArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -62,11 +63,12 @@ func (po podOwner) String() string {
|
||||
}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
removeDuplicatesArgs, ok := args.(*RemoveDuplicatesArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveDuplicatesArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if removeDuplicatesArgs.Namespaces != nil {
|
||||
@@ -85,6 +87,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemoveDuplicates{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: removeDuplicatesArgs,
|
||||
podFilter: podFilter,
|
||||
@@ -102,12 +105,13 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
ownerKeyOccurence := make(map[podOwner]int32)
|
||||
nodeCount := 0
|
||||
nodeMap := make(map[string]*v1.Node)
|
||||
logger := klog.FromContext(klog.NewContext(ctx, r.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, r.handle.GetPodsAssignedToNodeFunc(), r.podFilter)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
logger.Error(err, "Error listing evictable pods on node", "node", klog.KObj(node))
|
||||
continue
|
||||
}
|
||||
nodeMap[node.Name] = node
|
||||
@@ -163,7 +167,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
for _, keys := range existing {
|
||||
if reflect.DeepEqual(keys, podContainerKeys) {
|
||||
matched = true
|
||||
klog.V(3).InfoS("Duplicate found", "pod", klog.KObj(pod))
|
||||
logger.V(3).Info("Duplicate found", "pod", klog.KObj(pod))
|
||||
for _, ownerRef := range ownerRefList {
|
||||
ownerKey := podOwner{
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
@@ -195,16 +199,16 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
|
||||
targetNodes := getTargetNodes(podNodes, nodes)
|
||||
|
||||
klog.V(2).InfoS("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
logger.V(2).Info("Adjusting feasible nodes", "owner", ownerKey, "from", nodeCount, "to", len(targetNodes))
|
||||
if len(targetNodes) < 2 {
|
||||
klog.V(1).InfoS("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
|
||||
logger.V(1).Info("Less than two feasible nodes for duplicates to land, skipping eviction", "owner", ownerKey)
|
||||
continue
|
||||
}
|
||||
|
||||
upperAvg := int(math.Ceil(float64(ownerKeyOccurence[ownerKey]) / float64(len(targetNodes))))
|
||||
loop:
|
||||
for nodeName, pods := range podNodes {
|
||||
klog.V(2).InfoS("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
logger.V(2).Info("Average occurrence per node", "node", klog.KObj(nodeMap[nodeName]), "ownerKey", ownerKey, "avg", upperAvg)
|
||||
// list of duplicated pods does not contain the original referential pod
|
||||
if len(pods)+1 > upperAvg {
|
||||
// It's assumed all duplicated pods are in the same priority class
|
||||
@@ -220,7 +224,7 @@ func (r *RemoveDuplicates) Balance(ctx context.Context, nodes []*v1.Node) *frame
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -299,7 +299,7 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveDuplicatesArgs{
|
||||
plugin, err := New(ctx, &RemoveDuplicatesArgs{
|
||||
ExcludeOwnerKinds: testCase.excludeOwnerKinds,
|
||||
},
|
||||
handle,
|
||||
@@ -702,7 +702,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveDuplicatesArgs{},
|
||||
plugin, err := New(ctx, &RemoveDuplicatesArgs{},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -17,14 +17,16 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
func ValidateRemoveDuplicatesArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemoveDuplicatesArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package removeduplicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -11,6 +12,7 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
description string
|
||||
args *RemoveDuplicatesArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
@@ -20,7 +22,6 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
Include: []string{"default"},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
@@ -31,17 +32,19 @@ func TestValidateRemovePodsViolatingNodeTaintsArgs(t *testing.T) {
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf("only one of Include/Exclude namespaces can be set"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemoveDuplicatesArgs(tc.args)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemoveDuplicatesArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ const PluginName = "RemoveFailedPods"
|
||||
|
||||
// RemoveFailedPods evicts pods in failed status phase that match the given args criteria
|
||||
type RemoveFailedPods struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemoveFailedPodsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -44,11 +45,12 @@ type RemoveFailedPods struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemoveFailedPods{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
failedPodsArgs, ok := args.(*RemoveFailedPodsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemoveFailedPodsArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if failedPodsArgs.Namespaces != nil {
|
||||
@@ -71,7 +73,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if err := validateCanEvict(pod, failedPodsArgs); err != nil {
|
||||
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -79,6 +81,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
})
|
||||
|
||||
return &RemoveFailedPods{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: failedPodsArgs,
|
||||
@@ -92,8 +95,9 @@ func (d *RemoveFailedPods) Name() string {
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -114,7 +118,7 @@ func (d *RemoveFailedPods) Deschedule(ctx context.Context, nodes []*v1.Node) *fr
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -362,7 +362,7 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
plugin, err := New(&RemoveFailedPodsArgs{
|
||||
plugin, err := New(ctx, &RemoveFailedPodsArgs{
|
||||
Reasons: tc.args.Reasons,
|
||||
ExitCodes: tc.args.ExitCodes,
|
||||
MinPodLifetimeSeconds: tc.args.MinPodLifetimeSeconds,
|
||||
|
||||
@@ -18,21 +18,23 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
// ValidateRemoveFailedPodsArgs validates RemoveFailedPods arguments
|
||||
func ValidateRemoveFailedPodsArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemoveFailedPodsArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package removefailedpods
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -12,7 +13,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
args *RemoveFailedPodsArgs
|
||||
expectError bool
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
description: "valid namespace args, no errors",
|
||||
@@ -24,7 +25,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
Reasons: []string{"ReasonDoesNotMatch"},
|
||||
MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid namespaces args, expects error",
|
||||
@@ -34,7 +34,7 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
Exclude: []string{"kube-system"},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`only one of Include/Exclude namespaces can be set`),
|
||||
},
|
||||
{
|
||||
description: "valid label selector args, no errors",
|
||||
@@ -43,7 +43,6 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
MatchLabels: map[string]string{"role.kubernetes.io/node": ""},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid label selector args, expects errors",
|
||||
@@ -56,16 +55,19 @@ func TestValidateRemoveFailedPodsArgs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errInfo: fmt.Errorf(`failed to get label selectors from strategy's params: [key: Invalid value: "": name part must be non-empty; name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), values: Invalid value: null: for 'in', 'notin' operators, values set can't be empty]`),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
err := ValidateRemoveFailedPodsArgs(tc.args)
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected arg validation behavior")
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
validateErr := ValidateRemoveFailedPodsArgs(testCase.args)
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package removepodshavingtoomanyrestarts
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -36,6 +37,7 @@ const PluginName = "RemovePodsHavingTooManyRestarts"
|
||||
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
|
||||
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
|
||||
type RemovePodsHavingTooManyRestarts struct {
|
||||
logger klog.Logger
|
||||
handle frameworktypes.Handle
|
||||
args *RemovePodsHavingTooManyRestartsArgs
|
||||
podFilter podutil.FilterFunc
|
||||
@@ -44,11 +46,12 @@ type RemovePodsHavingTooManyRestarts struct {
|
||||
var _ frameworktypes.DeschedulePlugin = &RemovePodsHavingTooManyRestarts{}
|
||||
|
||||
// New builds plugin from its arguments while passing a handle
|
||||
func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
func New(ctx context.Context, args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
tooManyRestartsArgs, ok := args.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type RemovePodsHavingTooManyRestartsArgs, got %T", args)
|
||||
}
|
||||
logger := klog.FromContext(ctx).WithValues("plugin", PluginName)
|
||||
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if tooManyRestartsArgs.Namespaces != nil {
|
||||
@@ -69,7 +72,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
|
||||
podFilter = podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
if err := validateCanEvict(pod, tooManyRestartsArgs); err != nil {
|
||||
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
logger.V(4).Info(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -99,6 +102,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
|
||||
}
|
||||
|
||||
return &RemovePodsHavingTooManyRestarts{
|
||||
logger: logger,
|
||||
handle: handle,
|
||||
args: tooManyRestartsArgs,
|
||||
podFilter: podFilter,
|
||||
@@ -112,8 +116,9 @@ func (d *RemovePodsHavingTooManyRestarts) Name() string {
|
||||
|
||||
// Deschedule extension point implementation for the plugin
|
||||
func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", frameworktypes.DescheduleExtensionPoint)
|
||||
for _, node := range nodes {
|
||||
klog.V(2).InfoS("Processing node", "node", klog.KObj(node))
|
||||
logger.V(2).Info("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, d.handle.GetPodsAssignedToNodeFunc(), d.podFilter)
|
||||
if err != nil {
|
||||
// no pods evicted as error encountered retrieving evictable Pods
|
||||
@@ -121,6 +126,15 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
Err: fmt.Errorf("error listing pods on a node: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
podRestarts := make(map[*v1.Pod]int32)
|
||||
for _, pod := range pods {
|
||||
podRestarts[pod] = getPodTotalRestarts(pod, d.args.IncludingInitContainers)
|
||||
}
|
||||
// sort pods by restarts count
|
||||
sort.Slice(pods, func(i, j int) bool {
|
||||
return podRestarts[pods[i]] > podRestarts[pods[j]]
|
||||
})
|
||||
totalPods := len(pods)
|
||||
loop:
|
||||
for i := 0; i < totalPods; i++ {
|
||||
@@ -134,7 +148,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return nil
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
logger.Error(err, "eviction failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -145,11 +159,7 @@ func (d *RemovePodsHavingTooManyRestarts) Deschedule(ctx context.Context, nodes
|
||||
func validateCanEvict(pod *v1.Pod, tooManyRestartsArgs *RemovePodsHavingTooManyRestartsArgs) error {
|
||||
var err error
|
||||
|
||||
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
|
||||
if tooManyRestartsArgs.IncludingInitContainers {
|
||||
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
|
||||
}
|
||||
|
||||
restarts := getPodTotalRestarts(pod, tooManyRestartsArgs.IncludingInitContainers)
|
||||
if restarts < tooManyRestartsArgs.PodRestartThreshold {
|
||||
err = fmt.Errorf("number of container restarts (%v) not exceeding the threshold", restarts)
|
||||
}
|
||||
@@ -165,3 +175,12 @@ func calcContainerRestartsFromStatuses(statuses []v1.ContainerStatus) int32 {
|
||||
}
|
||||
return restarts
|
||||
}
|
||||
|
||||
// getPodTotalRestarts get total restarts of a pod.
|
||||
func getPodTotalRestarts(pod *v1.Pod, includeInitContainers bool) int32 {
|
||||
restarts := calcContainerRestartsFromStatuses(pod.Status.ContainerStatuses)
|
||||
if includeInitContainers {
|
||||
restarts += calcContainerRestartsFromStatuses(pod.Status.InitContainerStatuses)
|
||||
}
|
||||
return restarts
|
||||
}
|
||||
|
||||
@@ -341,6 +341,7 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
}
|
||||
|
||||
plugin, err := New(
|
||||
ctx,
|
||||
&tc.args,
|
||||
handle)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,29 +15,32 @@ package removepodshavingtoomanyrestarts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// ValidateRemovePodsHavingTooManyRestartsArgs validates RemovePodsHavingTooManyRestarts arguments
|
||||
func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
|
||||
args := obj.(*RemovePodsHavingTooManyRestartsArgs)
|
||||
var allErrs []error
|
||||
// At most one of include/exclude can be set
|
||||
if args.Namespaces != nil && len(args.Namespaces.Include) > 0 && len(args.Namespaces.Exclude) > 0 {
|
||||
return fmt.Errorf("only one of Include/Exclude namespaces can be set")
|
||||
allErrs = append(allErrs, fmt.Errorf("only one of Include/Exclude namespaces can be set"))
|
||||
}
|
||||
|
||||
if args.LabelSelector != nil {
|
||||
if _, err := metav1.LabelSelectorAsSelector(args.LabelSelector); err != nil {
|
||||
return fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
|
||||
allErrs = append(allErrs, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err))
|
||||
}
|
||||
}
|
||||
|
||||
if args.PodRestartThreshold < 1 {
|
||||
return fmt.Errorf("invalid PodsHavingTooManyRestarts threshold")
|
||||
allErrs = append(allErrs, fmt.Errorf("invalid PodsHavingTooManyRestarts threshold"))
|
||||
}
|
||||
|
||||
allowedStates := sets.New(
|
||||
@@ -49,8 +52,10 @@ func ValidateRemovePodsHavingTooManyRestartsArgs(obj runtime.Object) error {
|
||||
)
|
||||
|
||||
if !allowedStates.HasAll(args.States...) {
|
||||
return fmt.Errorf("states must be one of %v", allowedStates.UnsortedList())
|
||||
allowed := allowedStates.UnsortedList()
|
||||
sort.Strings(allowed)
|
||||
allErrs = append(allErrs, fmt.Errorf("states must be one of %v", allowed))
|
||||
}
|
||||
|
||||
return nil
|
||||
return utilerrors.NewAggregate(allErrs)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user