1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 21:31:18 +01:00

Compare commits

..

8 Commits

Author SHA1 Message Date
Jan Chaloupka
b56794708d descheduler: wire the metrics collector with the framework handle 2024-11-05 21:13:27 +01:00
Jan Chaloupka
b7b352780e LowNodeUtilization: test metrics based utilization 2024-11-05 21:11:33 +01:00
Jan Chaloupka
646a383b37 Get pod usage from the usage client 2024-11-05 14:07:59 +01:00
Jan Chaloupka
ad18f41b66 Update actualUsageClient 2024-11-04 18:11:27 +01:00
Jan Chaloupka
80f9c0ada6 Separate usage client into a new file 2024-10-21 22:38:25 +02:00
Jan Chaloupka
3174107718 usageSnapshot -> requestedUsageClient 2024-10-21 22:22:38 +02:00
Jan Chaloupka
1f55c4d680 node utilization: abstract pod utilization retriever 2024-10-15 12:18:37 +02:00
Jan Chaloupka
dc9bea3ede nodeutiliation: create a usage snapshot 2024-10-15 12:18:30 +02:00
3065 changed files with 76189 additions and 233341 deletions

View File

@@ -7,22 +7,20 @@ jobs:
deploy: deploy:
strategy: strategy:
matrix: matrix:
k8s-version: ["v1.33.0"] k8s-version: ["v1.31.0"]
descheduler-version: ["v0.33.0"] descheduler-version: ["v0.31.0"]
descheduler-api: ["v1alpha2"] descheduler-api: ["v1alpha2"]
manifest: ["deployment"] manifest: ["deployment"]
kind-version: ["v0.27.0"] # keep in sync with test/run-e2e-tests.sh
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout Repo - name: Checkout Repo
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Create kind cluster - name: Create kind cluster
uses: helm/kind-action@v1.12.0 uses: helm/kind-action@v1.10.0
with: with:
node_image: kindest/node:${{ matrix.k8s-version }} node_image: kindest/node:${{ matrix.k8s-version }}
kubectl_version: ${{ matrix.k8s-version }} kubectl_version: ${{ matrix.k8s-version }}
config: test/kind-config.yaml config: test/kind-config.yaml
version: ${{ matrix.kind-version }}
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version-file: go.mod go-version-file: go.mod

View File

@@ -1,5 +1,5 @@
run: run:
timeout: 5m timeout: 2m
linters: linters:
disable-all: true disable-all: true

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
FROM golang:1.24.2 FROM golang:1.22.5
WORKDIR /go/src/sigs.k8s.io/descheduler WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . . COPY . .
@@ -21,7 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
FROM scratch FROM scratch
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io> MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler

View File

@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
FROM scratch FROM scratch
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io> MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000 USER 1000

View File

@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}" LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.64.8 GOLANGCI_VERSION := v1.61.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null) HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOFUMPT_VERSION := v0.7.0 GOFUMPT_VERSION := v0.7.0
@@ -148,7 +148,7 @@ lint:
ifndef HAS_GOLANGCI ifndef HAS_GOLANGCI
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION} curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif endif
./_output/bin/golangci-lint run -v ./_output/bin/golangci-lint run
fmt: fmt:
ifndef HAS_GOFUMPT ifndef HAS_GOFUMPT

2
OWNERS
View File

@@ -13,8 +13,6 @@ reviewers:
- janeliul - janeliul
- knelasevero - knelasevero
- jklaw90 - jklaw90
- googs1025
- ricardomaraschini
emeritus_approvers: emeritus_approvers:
- aveshagarwal - aveshagarwal
- k82cn - k82cn

139
README.md
View File

@@ -33,15 +33,15 @@ but relies on the default scheduler for that.
## ⚠️ Documentation Versions by Release ## ⚠️ Documentation Versions by Release
If you are using a published release of Descheduler (such as If you are using a published release of Descheduler (such as
`registry.k8s.io/descheduler/descheduler:v0.33.0`), follow the documentation in `registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
that version's release branch, as listed below: that version's release branch, as listed below:
|Descheduler Version|Docs link| |Descheduler Version|Docs link|
|---|---| |---|---|
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)| |v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)| |v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|v0.28.x|[`release-1.28`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.28/README.md)|
The The
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md) [`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
@@ -93,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job Run As A Job
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.31' | kubectl apply -f -
``` ```
Run As A CronJob Run As A CronJob
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.31' | kubectl apply -f -
``` ```
Run As A Deployment Run As A Deployment
``` ```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f - kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.31' | kubectl apply -f -
``` ```
## User Guide ## User Guide
@@ -118,50 +118,29 @@ The Descheduler Policy is configurable and includes default strategy plugins tha
These are top level keys in the Descheduler Policy that you can use to configure all evictions. These are top level keys in the Descheduler Policy that you can use to configure all evictions.
| Name | type | Default Value | Description | | Name |type| Default Value | Description |
|------------------------------------|----------|---------------|----------------------------------------------------------------------------------------------------------------------------| |------|----|---------------|-------------|
| `nodeSelector` | `string` | `nil` | Limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point. | | `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
| `maxNoOfPodsToEvictPerNode` | `int` | `nil` | Maximum number of pods evicted from each node (summed through all strategies). | | `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `int` | `nil` | Maximum number of pods evicted from each namespace (summed through all strategies). | | `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `maxNoOfPodsToEvictTotal` | `int` | `nil` | Maximum number of pods evicted per rescheduling cycle (summed through all strategies). | | `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
| `metricsCollector` (deprecated) | `object` | `nil` | Configures collection of metrics for actual resource utilization. |
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
| `prometheus.authToken.secretReference` |`object`| `nil` | Read the authentication token from a kubernetes secret (the secret is expected to contain the token under `prometheusAuthToken` data key) |
| `prometheus.authToken.secretReference.namespace` |`string`| `nil` | Authentication token kubernetes secret namespace (currently, the RBAC configuration permits retrieving secrets from the `kube-system` namespace. If the secret needs to be accessed from a different namespace, the existing RBAC rules must be explicitly extended. |
| `prometheus.authToken.secretReference.name` |`string`| `nil` | Authentication token kubernetes secret name |
The descheduler currently allows to configure a metric collection of Kubernetes Metrics through `metricsProviders` field.
The previous way of setting `metricsCollector` field is deprecated. There are currently two sources to configure:
- `KubernetesMetrics`: enables metrics collection from Kubernetes Metrics server
- `Prometheus`: enables metrics collection from Prometheus server
In general, each plugin can consume metrics from a different provider so multiple distinct providers can be configured in parallel.
### Evictor Plugin configuration (Default Evictor) ### Evictor Plugin configuration (Default Evictor)
The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config. The Default Evictor Plugin is used by default for filtering pods before processing them in an strategy plugin, or for applying a PreEvictionFilter of pods before eviction. You can also create your own Evictor Plugin or use the Default one provided by Descheduler. Other uses for the Evictor plugin can be to sort, filter, validate or group pods by different criteria, and that's why this is handled by a plugin and not configured in the top level config.
| Name |type| Default Value | Description | | Name |type| Default Value | Description |
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------| |------|----|---------------|-------------|
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed | | `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage | | `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
| `evictDaemonSetPods` | bool | false | allows eviction of DaemonSet managed Pods. |
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns | | `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored | | `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase | | `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
| `labelSelector` |`metav1.LabelSelector`|| (see [label filtering](#label-filtering)) | |`labelSelector`|`metav1.LabelSelector`||(see [label filtering](#label-filtering))|
| `priorityThreshold` |`priorityThreshold`|| (see [priority filtering](#priority-filtering)) | |`priorityThreshold`|`priorityThreshold`||(see [priority filtering](#priority-filtering))|
| `nodeFit` |`bool`|`false`| (see [node fit filtering](#node-fit-filtering)) | |`nodeFit`|`bool`|`false`|(see [node fit filtering](#node-fit-filtering))|
| `minReplicas` |`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold | |`minReplicas`|`uint`|`0`| ignore eviction of pods where owner (e.g. `ReplicaSet`) replicas is below this threshold |
| `minPodAge` |`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold | |`minPodAge`|`metav1.Duration`|`0`| ignore eviction of pods with a creation time within this threshold |
| `ignorePodsWithoutPDB` |`bool`|`false`| set whether pods without PodDisruptionBudget should be evicted or ignored |
### Example policy ### Example policy
@@ -178,16 +157,6 @@ nodeSelector: "node=node1" # you don't need to set this, if not set all will be
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
gracePeriodSeconds: 60 # you don't need to set this, 0 if not set
# you don't need to set this, metrics are not collected if not set
metricsProviders:
- source: Prometheus
prometheus:
url: http://prometheus-kube-prometheus-prometheus.prom.svc.cluster.local
authToken:
secretReference:
namespace: "kube-system"
name: "authtoken"
profiles: profiles:
- name: ProfileName - name: ProfileName
pluginConfig: pluginConfig:
@@ -307,18 +276,11 @@ If that parameter is set to `true`, the thresholds are considered as percentage
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean. `thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization). A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** By default node resource consumption is determined by the requests and limits of pods, not actual usage. **NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field (deprecated) actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
or `metricsUtilization.source` field to `KubernetesMetrics`.
In order to have the plugin consume the metrics the metric provider needs to be configured as well.
Alternatively, it is possible to create a prometheus client and configure a prometheus query to consume
metrics outside of the kubernetes metrics server. The query is expected to return a vector of values for
each node. The values are expected to be any real number within <0; 1> interval. During eviction only
a single pod is evicted at most from each overutilized node. There's currently no support for evicting more.
See `metricsProviders` field at [Top Level configuration](#top-level-configuration) for available options.
**Parameters:** **Parameters:**
@@ -328,13 +290,7 @@ See `metricsProviders` field at [Top Level configuration](#top-level-configurati
|`thresholds`|map(string:int)| |`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)| |`targetThresholds`|map(string:int)|
|`numberOfNodes`|int| |`numberOfNodes`|int|
|`evictionLimits`|object|
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))| |`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|`metricsUtilization`|object|
|`metricsUtilization.metricsServer` (deprecated)|bool|
|`metricsUtilization.source`|string|
|`metricsUtilization.prometheus.query`|string|
**Example:** **Example:**
@@ -354,12 +310,6 @@ profiles:
"cpu" : 50 "cpu" : 50
"memory": 50 "memory": 50
"pods": 50 "pods": 50
# metricsUtilization:
# source: Prometheus
# prometheus:
# query: instance:node_cpu:rate:sum
evictionLimits:
node: 5
plugins: plugins:
balance: balance:
enabled: enabled:
@@ -375,12 +325,10 @@ and will not be used to compute node's usage if it's not specified in `threshold
* The valid range of the resource's percentage value is \[0, 100\] * The valid range of the resource's percentage value is \[0, 100\]
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource. * Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
There are two more parameters associated with the `LowNodeUtilization` strategy, called `numberOfNodes` and `evictionLimits`. There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
The first parameter can be configured to activate the strategy only when the number of under utilized nodes This parameter can be configured to activate the strategy only when the number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero. under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
The second parameter is useful when a number of evictions per the plugin per a descheduling cycle needs to be limited.
The parameter currently enables to limit the number of evictions per node through `node` field.
### HighNodeUtilization ### HighNodeUtilization
@@ -406,12 +354,6 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
so that they can be recreated in appropriately utilized nodes. so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero. The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
To control pod eviction from underutilized nodes, use the `evictionModes`
array. A lenient policy, which evicts pods regardless of their resource
requests, is the default. To enable a stricter policy that only evicts pods
with resource requests defined for the provided threshold resources, add the
option `OnlyThresholdingResources` to the `evictionModes` configuration.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage. **NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
@@ -424,15 +366,8 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|---|---| |---|---|
|`thresholds`|map(string:int)| |`thresholds`|map(string:int)|
|`numberOfNodes`|int| |`numberOfNodes`|int|
|`evictionModes`|list(string)|
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))| |`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
**Supported Eviction Modes:**
|Name|Description|
|---|---|
|`OnlyThresholdingResources`|Evict only pods that have resource requests defined for the provided threshold resources.|
**Example:** **Example:**
```yaml ```yaml
@@ -451,8 +386,6 @@ profiles:
exclude: exclude:
- "kube-system" - "kube-system"
- "namespace1" - "namespace1"
evictionModes:
- "OnlyThresholdingResources"
plugins: plugins:
balance: balance:
enabled: enabled:
@@ -925,7 +858,7 @@ does not exist, descheduler won't create it and will throw an error.
### Label filtering ### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#labelselector-v1-meta) The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)
to filter pods by their labels: to filter pods by their labels:
* `PodLifeTime` * `PodLifeTime`
@@ -1045,12 +978,10 @@ To get best results from HA mode some additional configurations might require:
## Metrics ## Metrics
| name | type | description | | name | type | description |
|---------------------------------------|--------------|-----------------------------------------------------------------------------------| |-------|-------|----------------|
| build_info | gauge | constant 1 | | build_info | gauge | constant 1 |
| pods_evicted | CounterVec | total number of pods evicted | | pods_evicted | CounterVec | total number of pods evicted |
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
The metrics are served through https://localhost:10258/metrics by default. The metrics are served through https://localhost:10258/metrics by default.
The address and port can be changed by setting `--binding-address` and `--secure-port` flags. The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
@@ -1066,12 +997,6 @@ packages that it is compiled with.
| Descheduler | Supported Kubernetes Version | | Descheduler | Supported Kubernetes Version |
|-------------|------------------------------| |-------------|------------------------------|
| v0.33 | v1.33 |
| v0.32 | v1.32 |
| v0.31 | v1.31 |
| v0.30 | v1.30 |
| v0.29 | v1.29 |
| v0.28 | v1.28 |
| v0.27 | v1.27 | | v0.27 | v1.27 |
| v0.26 | v1.26 | | v0.26 | v1.26 |
| v0.25 | v1.25 | | v0.25 | v1.25 |

View File

@@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: descheduler name: descheduler
version: 0.33.0 version: 0.31.0
appVersion: 0.33.0 appVersion: 0.31.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that. description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords: keywords:
- kubernetes - kubernetes
@@ -13,4 +13,4 @@ sources:
- https://github.com/kubernetes-sigs/descheduler - https://github.com/kubernetes-sigs/descheduler
maintainers: maintainers:
- name: Kubernetes SIG Scheduling - name: Kubernetes SIG Scheduling
email: sig-scheduling@kubernetes.io email: kubernetes-sig-scheduling@googlegroups.com

View File

@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
## Introduction ## Introduction
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below. This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites ## Prerequisites
@@ -64,6 +64,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `replicas` | The replica count for Deployment | `1` | | `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ | | `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ | | `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` | | `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` | | `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ | | `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |

View File

@@ -10,13 +10,3 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
{{- end}} {{- end}}
{{- end}} {{- end}}
{{- end}} {{- end}}
{{- if .Values.deschedulerPolicy }}
A DeschedulerPolicy has been applied for you. You can view the policy with:
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
deschedulerPolicy: {}
{{- end }}

View File

@@ -24,9 +24,6 @@ rules:
- apiGroups: ["scheduling.k8s.io"] - apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"] resources: ["priorityclasses"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }} {{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
@@ -36,13 +33,4 @@ rules:
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"] resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"] verbs: ["get", "patch", "delete"]
{{- end }} {{- end }}
{{- if and .Values.deschedulerPolicy }}
{{- range .Values.deschedulerPolicy.metricsProviders }}
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
{{- end }}
{{- end }}
{{- end }}
{{- end -}} {{- end -}}

View File

@@ -15,10 +15,10 @@ spec:
{{- if .Values.startingDeadlineSeconds }} {{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }} startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }} {{- end }}
{{- if ne .Values.successfulJobsHistoryLimit nil }} {{- if .Values.successfulJobsHistoryLimit }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }} successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }} {{- end }}
{{- if ne .Values.failedJobsHistoryLimit nil }} {{- if .Values.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }} failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }} {{- end }}
{{- if .Values.timeZone }} {{- if .Values.timeZone }}
@@ -89,8 +89,6 @@ spec:
{{- end }} {{- end }}
livenessProbe: livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }} {{- toYaml .Values.livenessProbe | nindent 16 }}
ports:
{{- toYaml .Values.ports | nindent 16 }}
resources: resources:
{{- toYaml .Values.resources | nindent 16 }} {{- toYaml .Values.resources | nindent 16 }}
{{- if .Values.securityContext }} {{- if .Values.securityContext }}

View File

@@ -59,11 +59,10 @@ spec:
- {{ printf "--%s" $key }} - {{ printf "--%s" $key }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- if .Values.leaderElection.enabled }}
{{- include "descheduler.leaderElection" . | nindent 12 }} {{- include "descheduler.leaderElection" . | nindent 12 }}
{{- end }}
ports: ports:
{{- toYaml .Values.ports | nindent 12 }} - containerPort: 10258
protocol: TCP
livenessProbe: livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }} {{- toYaml .Values.livenessProbe | nindent 12 }}
resources: resources:

View File

@@ -18,13 +18,9 @@ resources:
requests: requests:
cpu: 500m cpu: 500m
memory: 256Mi memory: 256Mi
limits: # limits:
cpu: 500m # cpu: 100m
memory: 256Mi # memory: 128Mi
ports:
- containerPort: 10258
protocol: TCP
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
@@ -89,12 +85,16 @@ cmdOptions:
deschedulerPolicyAPIVersion: "descheduler/v1alpha2" deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
# deschedulerPolicy contains the policies the descheduler will execute. # deschedulerPolicy contains the policies the descheduler will execute.
# To use policies stored in an existing configMap use:
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
# deschedulerPolicy: {}
deschedulerPolicy: deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2" # nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10 # maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10 # maxNoOfPodsToEvictPerNamespace: 10
# metricsProviders: # ignorePvcPods: true
# - source: KubernetesMetrics # evictLocalStoragePods: true
# evictDaemonSetPods: true
# tracing: # tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317 # collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: "" # transportCert: ""

View File

@@ -18,29 +18,19 @@ limitations under the License.
package options package options
import ( import (
"strings"
"time" "time"
promapi "github.com/prometheus/client_golang/api"
"github.com/spf13/pflag" "github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserver "k8s.io/apiserver/pkg/server"
apiserveroptions "k8s.io/apiserver/pkg/server/options" apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
cliflag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options" componentbaseoptions "k8s.io/component-base/config/options"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned" metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig" "sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1" "sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme" deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
) )
@@ -52,18 +42,12 @@ const (
type DeschedulerServer struct { type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration componentconfig.DeschedulerConfiguration
Client clientset.Interface Client clientset.Interface
EventClient clientset.Interface EventClient clientset.Interface
MetricsClient metricsclient.Interface MetricsClient metricsclient.Interface
PrometheusClient promapi.Client SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback DisableMetrics bool
SecureServingInfo *apiserver.SecureServingInfo EnableHTTP2 bool
DisableMetrics bool
EnableHTTP2 bool
// FeatureGates enabled by the user
FeatureGates map[string]bool
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
DefaultFeatureGates featuregate.FeatureGate
} }
// NewDeschedulerServer creates a new DeschedulerServer with default parameters // NewDeschedulerServer creates a new DeschedulerServer with default parameters
@@ -121,31 +105,8 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces") fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error") fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check") fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs) componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs) rs.SecureServing.AddFlags(fs)
} }
func (rs *DeschedulerServer) Apply() error {
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
if err != nil {
return err
}
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
// loopbackClientConfig is a config for a privileged loopback connection
var loopbackClientConfig *restclient.Config
var secureServing *apiserver.SecureServingInfo
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err
}
secureServing.DisableHTTP2 = !rs.EnableHTTP2
rs.SecureServingInfo = secureServing
return nil
}

View File

@@ -23,16 +23,19 @@ import (
"os/signal" "os/signal"
"syscall" "syscall"
"github.com/spf13/cobra" "k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler" "sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/server/healthz" apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/featuregate" "k8s.io/component-base/featuregate"
"k8s.io/component-base/logs" "k8s.io/component-base/logs"
logsapi "k8s.io/component-base/logs/api/v1" logsapi "k8s.io/component-base/logs/api/v1"
@@ -64,16 +67,40 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return nil return nil
}, },
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
if err = s.Apply(); err != nil { // loopbackClientConfig is a config for a privileged loopback connection
klog.ErrorS(err, "failed to apply") var loopbackClientConfig *restclient.Config
var secureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err return err
} }
if err = Run(cmd.Context(), s); err != nil { secureServing.DisableHTTP2 = !s.EnableHTTP2
klog.ErrorS(err, "failed to run descheduler server")
ctx, done := signal.NotifyContext(cmd.Context(), syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err return err
} }
if err = Run(ctx, s); err != nil {
klog.ErrorS(err, "descheduler server")
return err
}
done()
// wait for metrics server to close
<-stoppedCh
return nil return nil
}, },
} }
@@ -87,23 +114,8 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return cmd return cmd
} }
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error { func Run(ctx context.Context, rs *options.DeschedulerServer) error {
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM) err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !rs.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err
}
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil { if err != nil {
klog.ErrorS(err, "failed to create tracer provider") klog.ErrorS(err, "failed to create tracer provider")
} }
@@ -112,14 +124,5 @@ func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
// increase the fake watch channel so the dry-run mode can be run // increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods // over a cluster with thousands of pods
watch.DefaultChanSize = 100000 watch.DefaultChanSize = 100000
err = descheduler.Run(ctx, rs) return descheduler.Run(ctx, rs)
if err != nil {
return err
}
done()
// wait for metrics server to close
<-stoppedCh
return nil
} }

View File

@@ -23,17 +23,13 @@ descheduler [flags]
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags. --disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run Execute descheduler in dry run mode. --dry-run Execute descheduler in dry run mode.
--enable-http2 If http/2 should be enabled for the metrics and health check --enable-http2 If http/2 should be enabled for the metrics and health check
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
EvictionsInBackground=true|false (ALPHA - default=false)
-h, --help help for descheduler -h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default. --http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead. --kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability. --leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s) --leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s) --leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases") --leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler") --leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system") --leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s) --leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)

View File

@@ -3,7 +3,7 @@
## Required Tools ## Required Tools
- [Git](https://git-scm.com/downloads) - [Git](https://git-scm.com/downloads)
- [Go 1.23+](https://golang.org/dl/) - [Go 1.16+](https://golang.org/dl/)
- [Docker](https://docs.docker.com/install/) - [Docker](https://docs.docker.com/install/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
- [kind v0.10.0+](https://kind.sigs.k8s.io/) - [kind v0.10.0+](https://kind.sigs.k8s.io/)

View File

@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344) 3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
4. Cut release branch from `master`, eg `release-1.24` 4. Cut release branch from `master`, eg `release-1.24`
5. Publish release using Github's release process from the git tag you created 5. Publish release using Github's release process from the git tag you created
6. Email `sig-scheduling@kubernetes.io` to announce the release 6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
**Patch release** **Patch release**
1. Pick relevant code change commits to the matching release branch, eg `release-1.24` 1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
3. Merge Helm chart version update to release branch 3. Merge Helm chart version update to release branch
4. Perform the image promotion process for the patch version 4. Perform the image promotion process for the patch version
5. Publish release using Github's release process from the git tag you created 5. Publish release using Github's release process from the git tag you created
6. Email `sig-scheduling@kubernetes.io` to announce the release 6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
### Flowchart ### Flowchart

View File

@@ -4,12 +4,12 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures | Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------| ------------------- |-------------------------------------------------|-------------------------|
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 | v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 | v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 | v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 | v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler starting with descheduler release v0.20.0 use the below process to download the official descheduler

129
go.mod
View File

@@ -1,41 +1,32 @@
module sigs.k8s.io/descheduler module sigs.k8s.io/descheduler
go 1.24.2 go 1.22.5
require ( require (
github.com/client9/misspell v0.3.4 github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.7.0 github.com/google/go-cmp v0.6.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.62.0
github.com/spf13/cobra v1.8.1 github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
go.opentelemetry.io/otel v1.33.0 go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
go.opentelemetry.io/otel/sdk v1.33.0 go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.33.0 go.opentelemetry.io/otel/trace v1.28.0
google.golang.org/grpc v1.68.1 google.golang.org/grpc v1.65.0
k8s.io/api v0.33.0 k8s.io/api v0.31.0
k8s.io/apimachinery v0.33.0 k8s.io/apimachinery v0.31.0
k8s.io/apiserver v0.33.0 k8s.io/apiserver v0.31.0
k8s.io/client-go v0.33.0 k8s.io/client-go v0.31.0
k8s.io/code-generator v0.33.0 k8s.io/code-generator v0.31.0
k8s.io/component-base v0.33.0 k8s.io/component-base v0.31.0
k8s.io/component-helpers v0.33.0 k8s.io/component-helpers v0.31.0
k8s.io/klog/v2 v2.130.1 k8s.io/klog/v2 v2.130.1
k8s.io/metrics v0.33.0 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
kubevirt.io/api v1.3.0
kubevirt.io/client-go v1.3.0
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
sigs.k8s.io/mdtoc v1.1.0 sigs.k8s.io/mdtoc v1.1.0
sigs.k8s.io/yaml v1.4.0 sigs.k8s.io/yaml v1.4.0
) )
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
require ( require (
cel.dev/expr v0.19.1 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
@@ -52,82 +43,74 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-kit/kit v0.13.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/swag v0.22.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.2.4 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.20.1 // indirect
github.com/google/cel-go v0.23.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.21 // indirect go.etcd.io/etcd/api/v3 v3.5.14 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
go.etcd.io/etcd/client/v3 v3.5.21 // indirect go.etcd.io/etcd/client/v3 v3.5.14 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.26.0 // indirect
golang.org/x/crypto v0.36.0 // indirect golang.org/x/crypto v0.24.0 // indirect
golang.org/x/mod v0.21.0 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/net v0.38.0 // indirect golang.org/x/mod v0.17.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/net v0.26.0 // indirect
golang.org/x/sync v0.12.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.31.0 // indirect golang.org/x/sync v0.7.0 // indirect
golang.org/x/term v0.30.0 // indirect golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.23.0 // indirect golang.org/x/term v0.21.0 // indirect
golang.org/x/time v0.9.0 // indirect golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.26.0 // indirect golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect
google.golang.org/protobuf v1.36.5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.30.0 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect k8s.io/kms v0.31.0 // indirect
k8s.io/kms v0.33.0 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/kube-openapi v0.30.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
) )
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f replace go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0

659
go.sum
View File

@@ -1,13 +1,11 @@
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
@@ -18,20 +16,12 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
@@ -43,153 +33,83 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU=
github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo= github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU= github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84=
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -197,521 +117,212 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4=
github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ=
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA= go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8= go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk= go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU= go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk= go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs=
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ=
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ= golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY=
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs= k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk=
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= k8s.io/code-generator v0.31.0 h1:w607nrMi1KeDKB3/F/J4lIoOgAwc+gV9ZKew4XRfMp8=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/code-generator v0.31.0/go.mod h1:84y4w3es8rOJOUUP1rLsIiGlO1JuEaPFXQPA9e/K6U0=
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs=
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo=
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= k8s.io/component-helpers v0.31.0 h1:jyRUKA+GX+q19o81k4x94imjNICn+e6Gzi6T89va1/A=
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= k8s.io/component-helpers v0.31.0/go.mod h1:MrNIvT4iB7wXIseYSWfHUJB/aNUiFvbilp4qDfBQi6s=
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4=
k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o=
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kms v0.33.0 h1:fhQSW/vyaWDhMp0vDuO/sLg2RlGZf4F77beSXcB4/eE= k8s.io/kms v0.31.0 h1:KchILPfB1ZE+ka7223mpU5zeFNkmb45jl7RHnlImUaI=
k8s.io/kms v0.33.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E= k8s.io/kms v0.31.0/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/metrics v0.33.0 h1:sKe5sC9qb1RakMhs8LWYNuN2ne6OTCWexj8Jos3rO2Y= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/metrics v0.33.0/go.mod h1:XewckTFXmE2AJiP7PT3EXaY7hi7bler3t2ZLyOdQYzU= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
kubevirt.io/client-go v1.3.0/go.mod h1:qmcJZvUjbmggY1pp7irO3zesBJj7wwGIWAdnYEoh3yc=
kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPxoegcKjkG9tYboFBs/U=
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo= sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w= sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@@ -19,7 +19,7 @@
go::verify_version() { go::verify_version() {
GO_VERSION=($(go version)) GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.20|go1.21|go1.22') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt." echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1 exit 1
fi fi

View File

@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
ret=1 ret=1
fi fi
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
echo "Your vendored results are different:" >&2 echo "Your vendored results are different:" >&2
echo "${_out}" >&2 echo "${_out}" >&2
echo "Vendor Verify failed." >&2 echo "Vendor Verify failed." >&2

View File

@@ -22,9 +22,6 @@ rules:
- apiGroups: ["scheduling.k8s.io"] - apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"] resources: ["priorityclasses"]
verbs: ["get", "watch", "list"] verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"] - apiGroups: ["coordination.k8s.io"]
resources: ["leases"] resources: ["leases"]
verbs: ["create", "update"] verbs: ["create", "update"]
@@ -32,18 +29,6 @@ rules:
resources: ["leases"] resources: ["leases"]
resourceNames: ["descheduler"] resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"] verbs: ["get", "patch", "delete"]
- apiGroups: ["metrics.k8s.io"]
resources: ["nodes", "pods"]
verbs: ["get", "list"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
@@ -63,16 +48,3 @@ subjects:
- name: descheduler-sa - name: descheduler-sa
kind: ServiceAccount kind: ServiceAccount
namespace: kube-system namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: descheduler-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: descheduler-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0 image: registry.k8s.io/descheduler/descheduler:v0.31.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa serviceAccountName: descheduler-sa
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0 image: registry.k8s.io/descheduler/descheduler:v0.31.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
command: command:
- "/bin/descheduler" - "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
containers: containers:
- name: descheduler - name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.33.0 image: registry.k8s.io/descheduler/descheduler:v0.31.0
volumeMounts: volumeMounts:
- mountPath: /policy-dir - mountPath: /policy-dir
name: policy-volume name: policy-volume

View File

@@ -18,7 +18,6 @@ package api
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
) )
@@ -43,22 +42,8 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total. // MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint MaxNoOfPodsToEvictTotal *uint
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
// Default is false.
EvictionFailureEventNotification *bool
// MetricsCollector configures collection of metrics about actual resource utilization // MetricsCollector configures collection of metrics about actual resource utilization
// Deprecated. Use MetricsProviders field instead. MetricsCollector MetricsCollector
MetricsCollector *MetricsCollector
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
MetricsProviders []MetricsProvider
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
GracePeriodSeconds *int64
} }
// Namespaces carries a list of included/excluded namespaces // Namespaces carries a list of included/excluded namespaces
@@ -68,12 +53,6 @@ type Namespaces struct {
Exclude []string `json:"exclude,omitempty"` Exclude []string `json:"exclude,omitempty"`
} }
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
type EvictionLimits struct {
// node restricts the maximum number of evictions per node
Node *uint `json:"node,omitempty"`
}
type ( type (
Percentage float64 Percentage float64
ResourceThresholds map[v1.ResourceName]Percentage ResourceThresholds map[v1.ResourceName]Percentage
@@ -109,54 +88,9 @@ type PluginSet struct {
Disabled []string Disabled []string
} }
type MetricsSource string
const (
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
KubernetesMetrics MetricsSource = "KubernetesMetrics"
// KubernetesMetrics enables metrics from a Prometheus metrics server.
PrometheusMetrics MetricsSource = "Prometheus"
)
// MetricsCollector configures collection of metrics about actual resource utilization // MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct { type MetricsCollector struct {
// Enabled metrics collection from Kubernetes metrics. // Enabled metrics collection from kubernetes metrics.
// Deprecated. Use MetricsProvider.Source field instead. // Later, the collection can be extended to other providers.
Enabled bool Enabled bool
} }
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
type MetricsProvider struct {
// Source enables metrics from Kubernetes metrics server.
Source MetricsSource
// Prometheus enables metrics collection through Prometheus
Prometheus *Prometheus
}
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
type Prometheus struct {
URL string
// authToken used for authentication with the prometheus server.
// If not set the in cluster authentication token for the descheduler service
// account is read from the container's file system.
AuthToken *AuthToken
}
type AuthToken struct {
// secretReference references an authentication token.
// secrets are expected to be created under the descheduler's namespace.
SecretReference *SecretReference
}
// SecretReference holds a reference to a Secret
type SecretReference struct {
// namespace is the namespace of the secret.
Namespace string
// name is the name of the secret.
Name string
}

View File

@@ -41,22 +41,8 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total. // MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"` MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification. // MetricsCollector configures collection of metrics about actual resource utilization
// Default is false. MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
// MetricsCollector configures collection of metrics for actual resource utilization
// Deprecated. Use MetricsProviders field instead.
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
} }
type DeschedulerProfile struct { type DeschedulerProfile struct {
@@ -84,51 +70,9 @@ type PluginSet struct {
Disabled []string `json:"disabled"` Disabled []string `json:"disabled"`
} }
type MetricsSource string
const (
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
KubernetesMetrics MetricsSource = "KubernetesMetrics"
// KubernetesMetrics enables metrics from a Prometheus metrics server.
PrometheusMetrics MetricsSource = "Prometheus"
)
// MetricsCollector configures collection of metrics about actual resource utilization // MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct { type MetricsCollector struct {
// Enabled metrics collection from Kubernetes metrics server. // Enabled metrics collection from kubernetes metrics.
// Deprecated. Use MetricsProvider.Source field instead. // Later, the collection can be extended to other providers.
Enabled bool `json:"enabled,omitempty"` Enabled bool
}
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
type MetricsProvider struct {
// Source enables metrics from Kubernetes metrics server.
Source MetricsSource `json:"source,omitempty"`
// Prometheus enables metrics collection through Prometheus
Prometheus *Prometheus `json:"prometheus,omitempty"`
}
type Prometheus struct {
URL string `json:"url,omitempty"`
// authToken used for authentication with the prometheus server.
// If not set the in cluster authentication token for the descheduler service
// account is read from the container's file system.
AuthToken *AuthToken `json:"authToken,omitempty"`
}
type AuthToken struct {
// secretReference references an authentication token.
// secrets are expected to be created under the descheduler's namespace.
SecretReference *SecretReference `json:"secretReference,omitempty"`
}
// SecretReference holds a reference to a Secret
type SecretReference struct {
// namespace is the namespace of the secret.
Namespace string `json:"namespace,omitempty"`
// name is the name of the secret.
Name string `json:"name,omitempty"`
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -36,16 +36,6 @@ func init() {
// RegisterConversions adds conversion functions to the given scheme. // RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes. // Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error { func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope) return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
}); err != nil { }); err != nil {
@@ -56,26 +46,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope) return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
}); err != nil { }); err != nil {
@@ -101,26 +71,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope) return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
}); err != nil { }); err != nil {
@@ -139,26 +89,6 @@ func RegisterConversions(s *runtime.Scheme) error {
return nil return nil
} }
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
return nil
}
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
}
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
return nil
}
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
}
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error { func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
if in.Profiles != nil { if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles in, out := &in.Profiles, &out.Profiles
@@ -175,10 +105,6 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace)) out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal)) out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
return nil return nil
} }
@@ -198,10 +124,6 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode)) out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace)) out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal)) out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
return nil return nil
} }
@@ -253,48 +175,6 @@ func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.Desch
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s) return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
} }
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
}
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
}
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
out.Source = api.MetricsSource(in.Source)
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
return nil
}
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
}
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
out.Source = MetricsSource(in.Source)
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
return nil
}
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
}
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error { func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name out.Name = in.Name
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
@@ -391,47 +271,3 @@ func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins,
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error { func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s) return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
} }
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
out.URL = in.URL
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
return nil
}
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
}
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
out.URL = in.URL
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
return nil
}
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
}
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
}
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -25,27 +25,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
*out = *in
if in.SecretReference != nil {
in, out := &in.SecretReference, &out.SecretReference
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
func (in *AuthToken) DeepCopy() *AuthToken {
if in == nil {
return nil
}
out := new(AuthToken)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) { func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in *out = *in
@@ -77,28 +56,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint) *out = new(uint)
**out = **in **out = **in
} }
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
if in.MetricsCollector != nil {
in, out := &in.MetricsCollector, &out.MetricsCollector
*out = new(MetricsCollector)
**out = **in
}
if in.MetricsProviders != nil {
in, out := &in.MetricsProviders, &out.MetricsProviders
*out = make([]MetricsProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GracePeriodSeconds != nil {
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
*out = new(int64)
**out = **in
}
return return
} }
@@ -144,43 +101,6 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
*out = *in
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(Prometheus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
if in == nil {
return nil
}
out := new(MetricsProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
*out = *in *out = *in
@@ -245,40 +165,3 @@ func (in *Plugins) DeepCopy() *Plugins {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
*out = *in
if in.AuthToken != nil {
in, out := &in.AuthToken, &out.AuthToken
*out = new(AuthToken)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
func (in *Prometheus) DeepCopy() *Prometheus {
if in == nil {
return nil
}
out := new(Prometheus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -25,27 +25,6 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
*out = *in
if in.SecretReference != nil {
in, out := &in.SecretReference, &out.SecretReference
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
func (in *AuthToken) DeepCopy() *AuthToken {
if in == nil {
return nil
}
out := new(AuthToken)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) { func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in *out = *in
@@ -77,28 +56,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint) *out = new(uint)
**out = **in **out = **in
} }
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
if in.MetricsCollector != nil {
in, out := &in.MetricsCollector, &out.MetricsCollector
*out = new(MetricsCollector)
**out = **in
}
if in.MetricsProviders != nil {
in, out := &in.MetricsProviders, &out.MetricsProviders
*out = make([]MetricsProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GracePeriodSeconds != nil {
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
*out = new(int64)
**out = **in
}
return return
} }
@@ -144,64 +101,6 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
*out = *in
if in.Node != nil {
in, out := &in.Node, &out.Node
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
if in == nil {
return nil
}
out := new(EvictionLimits)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
*out = *in
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(Prometheus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
if in == nil {
return nil
}
out := new(MetricsProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) { func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in *out = *in
@@ -316,27 +215,6 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
*out = *in
if in.AuthToken != nil {
in, out := &in.AuthToken, &out.AuthToken
*out = new(AuthToken)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
func (in *Prometheus) DeepCopy() *Prometheus {
if in == nil {
return nil
}
out := new(Prometheus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) { func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{ {
@@ -358,19 +236,3 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
in.DeepCopyInto(out) in.DeepCopyInto(out)
return *out return *out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -17,30 +17,18 @@ limitations under the License.
package client package client
import ( import (
"crypto/tls"
"crypto/x509"
"fmt" "fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"time"
promapi "github.com/prometheus/client_golang/api" clientset "k8s.io/client-go/kubernetes"
"github.com/prometheus/common/config" componentbaseconfig "k8s.io/component-base/config"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
// Ensure to load all auth plugins. // Ensure to load all auth plugins.
clientset "k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth" _ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
componentbaseconfig "k8s.io/component-base/config"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
) )
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) { func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
var cfg *rest.Config var cfg *rest.Config
if len(clientConnection.Kubeconfig) != 0 { if len(clientConnection.Kubeconfig) != 0 {
@@ -107,61 +95,3 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
} }
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found") return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
} }
func loadCAFile(filepath string) (*x509.CertPool, error) {
caCert, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
return nil, fmt.Errorf("failed to append CA certificate to the pool")
}
return caCertPool, nil
}
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
// Retrieve Pod CA cert
caCertPool, err := loadCAFile(K8sPodCAFilePath)
if err != nil {
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
}
// Get Prometheus Host
u, err := url.Parse(prometheusURL)
if err != nil {
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
}
t := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{
RootCAs: caCertPool,
ServerName: u.Host,
},
}
roundTripper := transport.NewBearerAuthRoundTripper(
authToken,
t,
)
if authToken != "" {
client, err := promapi.NewClient(promapi.Config{
Address: prometheusURL,
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
})
return client, t, err
}
client, err := promapi.NewClient(promapi.Config{
Address: prometheusURL,
})
return client, t, err
}

View File

@@ -20,37 +20,26 @@ import (
"context" "context"
"fmt" "fmt"
"math" "math"
"net/http"
"strconv" "strconv"
"time" "time"
promapi "github.com/prometheus/client_golang/api"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
policyv1 "k8s.io/api/policy/v1"
schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version" utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery" "k8s.io/client-go/discovery"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/client-go/util/workqueue"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@@ -71,11 +60,6 @@ import (
"sigs.k8s.io/descheduler/pkg/version" "sigs.k8s.io/descheduler/pkg/version"
) )
const (
prometheusAuthTokenSecretKey = "prometheusAuthToken"
workQueueKey = "key"
)
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
type profileRunner struct { type profileRunner struct {
@@ -84,21 +68,15 @@ type profileRunner struct {
} }
type descheduler struct { type descheduler struct {
rs *options.DeschedulerServer rs *options.DeschedulerServer
ir *informerResources ir *informerResources
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory sharedInformerFactory informers.SharedInformerFactory
namespacedSecretsLister corev1listers.SecretNamespaceLister deschedulerPolicy *api.DeschedulerPolicy
deschedulerPolicy *api.DeschedulerPolicy eventRecorder events.EventRecorder
eventRecorder events.EventRecorder podEvictor *evictions.PodEvictor
podEvictor *evictions.PodEvictor podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) metricsCollector *metricscollector.MetricsCollector
metricsCollector *metricscollector.MetricsCollector
prometheusClient promapi.Client
previousPrometheusClientTransport *http.Transport
queue workqueue.RateLimitingInterface
currentPrometheusAuthToken string
metricsProviders map[api.MetricsSource]*api.MetricsProvider
} }
type informerResources struct { type informerResources struct {
@@ -145,15 +123,7 @@ func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFact
return nil return nil
} }
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider { func newDescheduler(rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
for _, provider := range providersList {
providersMap[provider.Source] = &provider
}
return providersMap
}
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer() podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
ir := newInformerResources(sharedInformerFactory) ir := newInformerResources(sharedInformerFactory)
@@ -161,38 +131,32 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
v1.SchemeGroupVersion.WithResource("nodes"), v1.SchemeGroupVersion.WithResource("nodes"),
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay // Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
// consistent with the real runs without having to keep the list here in sync. // consistent with the real runs without having to keep the list here in sync.
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin schedulingv1.SchemeGroupVersion.WithResource("priorityclasses")) // Used by the defaultevictor plugin
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
) // Used by the defaultevictor plugin
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err) return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
} }
podEvictor, err := evictions.NewPodEvictor( podEvictor := evictions.NewPodEvictor(
ctx, nil,
rs.Client,
eventRecorder, eventRecorder,
podInformer,
rs.DefaultFeatureGates,
evictions.NewOptions(). evictions.NewOptions().
WithPolicyGroupVersion(evictionPolicyGroupVersion). WithPolicyGroupVersion(evictionPolicyGroupVersion).
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode). WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace). WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal). WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
WithDryRun(rs.DryRun). WithDryRun(rs.DryRun).
WithMetricsEnabled(!rs.DisableMetrics), WithMetricsEnabled(!rs.DisableMetrics),
) )
if err != nil {
return nil, err var metricsCollector *metricscollector.MetricsCollector
if deschedulerPolicy.MetricsCollector.Enabled {
metricsCollector = metricscollector.NewMetricsCollector(rs.Client, rs.MetricsClient)
} }
desch := &descheduler{ return &descheduler{
rs: rs, rs: rs,
ir: ir, ir: ir,
getPodsAssignedToNode: getPodsAssignedToNode, getPodsAssignedToNode: getPodsAssignedToNode,
@@ -201,148 +165,8 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
eventRecorder: eventRecorder, eventRecorder: eventRecorder,
podEvictor: podEvictor, podEvictor: podEvictor,
podEvictionReactionFnc: podEvictionReactionFnc, podEvictionReactionFnc: podEvictionReactionFnc,
prometheusClient: rs.PrometheusClient, metricsCollector: metricsCollector,
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}), }, nil
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
}
if rs.MetricsClient != nil {
nodeSelector := labels.Everything()
if deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
if err != nil {
return nil, err
}
nodeSelector = sel
}
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
}
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
}
if namespacedSharedInformerFactory == nil {
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
}
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
}
return desch, nil
}
func (d *descheduler) reconcileInClusterSAToken() error {
// Read the sa token and assume it has the sufficient permissions to authenticate
cfg, err := rest.InClusterConfig()
if err == nil {
if d.currentPrometheusAuthToken != cfg.BearerToken {
klog.V(2).Infof("Creating Prometheus client (with SA token)")
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
if err != nil {
return fmt.Errorf("unable to create a prometheus client: %v", err)
}
d.prometheusClient = prometheusClient
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = transport
d.currentPrometheusAuthToken = cfg.BearerToken
}
return nil
}
if err == rest.ErrNotInCluster {
return nil
}
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
}
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
defer utilruntime.HandleCrash()
defer d.queue.ShutDown()
klog.Infof("Starting authentication secret reconciler")
defer klog.Infof("Shutting down authentication secret reconciler")
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
<-ctx.Done()
}
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
for d.processNextWorkItem(ctx) {
}
}
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
dsKey, quit := d.queue.Get()
if quit {
return false
}
defer d.queue.Done(dsKey)
err := d.sync()
if err == nil {
d.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
d.queue.AddRateLimited(dsKey)
return true
}
func (d *descheduler) sync() error {
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
}
ns := prometheusConfig.AuthToken.SecretReference.Namespace
name := prometheusConfig.AuthToken.SecretReference.Name
secretObj, err := d.namespacedSecretsLister.Get(name)
if err != nil {
// clear the token if the secret is not found
if apierrors.IsNotFound(err) {
d.currentPrometheusAuthToken = ""
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = nil
d.prometheusClient = nil
}
return fmt.Errorf("unable to get %v/%v secret", ns, name)
}
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
if authToken == "" {
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
}
if d.currentPrometheusAuthToken == authToken {
return nil
}
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
if err != nil {
return fmt.Errorf("unable to create a prometheus client: %v", err)
}
d.prometheusClient = prometheusClient
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = transport
d.currentPrometheusAuthToken = authToken
return nil
}
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
}
} }
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error { func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
@@ -400,7 +224,7 @@ func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node)
d.runProfiles(ctx, client, nodes) d.runProfiles(ctx, client, nodes)
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests()) klog.V(1).InfoS("Number of evicted pods", "totalEvicted", d.podEvictor.TotalEvicted())
return nil return nil
} }
@@ -422,7 +246,6 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
frameworkprofile.WithPodEvictor(d.podEvictor), frameworkprofile.WithPodEvictor(d.podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode), frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
frameworkprofile.WithMetricsCollector(d.metricsCollector), frameworkprofile.WithMetricsCollector(d.metricsCollector),
frameworkprofile.WithPrometheusClient(d.prometheusClient),
) )
if err != nil { if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name) klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
@@ -487,7 +310,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return err return err
} }
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil { if deschedulerPolicy.MetricsCollector.Enabled {
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler") metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
if err != nil { if err != nil {
return err return err
@@ -570,14 +393,6 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
} }
} }
type tokenReconciliation int
const (
noReconciliation tokenReconciliation = iota
inClusterReconciliation
secretReconciliation
)
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error { func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
var span trace.Span var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies") ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
@@ -599,22 +414,7 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient) eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown() defer eventBroadcaster.Shutdown()
var namespacedSharedInformerFactory informers.SharedInformerFactory descheduler, err := newDescheduler(rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
metricProviderTokenReconciliation := noReconciliation
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
if prometheusProvider.Prometheus.AuthToken != nil {
// Will get reconciled
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
metricProviderTokenReconciliation = secretReconciliation
} else {
// Use the sa token and assume it has the sufficient permissions to authenticate
metricProviderTokenReconciliation = inClusterReconciliation
}
}
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
if err != nil { if err != nil {
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error()))) span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
return err return err
@@ -623,43 +423,15 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
defer cancel() defer cancel()
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
if metricProviderTokenReconciliation == secretReconciliation {
namespacedSharedInformerFactory.Start(ctx.Done())
}
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
if metricProviderTokenReconciliation == secretReconciliation {
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
}
if descheduler.metricsCollector != nil { go func() {
go func() { klog.V(2).Infof("Starting metrics collector")
klog.V(2).Infof("Starting metrics collector") descheduler.metricsCollector.Run(ctx)
descheduler.metricsCollector.Run(ctx) klog.V(2).Infof("Stopped metrics collector")
klog.V(2).Infof("Stopped metrics collector") }()
}()
klog.V(2).Infof("Waiting for metrics collector to sync")
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
return descheduler.metricsCollector.HasSynced(), nil
}); err != nil {
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
}
}
if metricProviderTokenReconciliation == secretReconciliation {
go descheduler.runAuthenticationSecretReconciler(ctx)
}
wait.NonSlidingUntil(func() { wait.NonSlidingUntil(func() {
if metricProviderTokenReconciliation == inClusterReconciliation {
// Read the sa token and assume it has the sufficient permissions to authenticate
if err := descheduler.reconcileInClusterSAToken(); err != nil {
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
return
}
}
// A next context is created here intentionally to avoid nesting the spans via context. // A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil") sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End() defer sSpan.End()

View File

@@ -2,35 +2,27 @@ package descheduler
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"math/rand"
"net/http"
"testing" "testing"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
apiversion "k8s.io/apimachinery/pkg/version" apiversion "k8s.io/apimachinery/pkg/version"
fakediscovery "k8s.io/client-go/discovery/fake" fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/metrics/pkg/apis/metrics/v1beta1" "k8s.io/metrics/pkg/apis/metrics/v1beta1"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake" fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
utilptr "k8s.io/utils/ptr" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options" "sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry" "sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor" "sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization" "sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
@@ -41,28 +33,6 @@ import (
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
var (
podEvictionError = errors.New("PodEvictionError")
tooManyRequestsError = &apierrors.StatusError{
ErrStatus: metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusTooManyRequests,
Reason: metav1.StatusReasonTooManyRequests,
Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
},
}
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
)
func initFeatureGates() featuregate.FeatureGate {
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
})
return featureGates
}
func initPluginRegistry() { func initPluginRegistry() {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry() pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry) pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
@@ -136,10 +106,6 @@ func removeDuplicatesPolicy() *api.DeschedulerPolicy {
} }
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy { func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
var metricsSource api.MetricsSource = ""
if metricsEnabled {
metricsSource = api.KubernetesMetrics
}
return &api.DeschedulerPolicy{ return &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{ Profiles: []api.DeschedulerProfile{
{ {
@@ -150,8 +116,8 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
Args: &nodeutilization.LowNodeUtilizationArgs{ Args: &nodeutilization.LowNodeUtilizationArgs{
Thresholds: thresholds, Thresholds: thresholds,
TargetThresholds: targetThresholds, TargetThresholds: targetThresholds,
MetricsUtilization: &nodeutilization.MetricsUtilization{ MetricsUtilization: nodeutilization.MetricsUtilization{
Source: metricsSource, MetricsServer: metricsEnabled,
}, },
}, },
}, },
@@ -177,7 +143,7 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
} }
} }
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) { func initDescheduler(t *testing.T, ctx context.Context, internalDeschedulerPolicy *api.DeschedulerPolicy, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
client := fakeclientset.NewSimpleClientset(objects...) client := fakeclientset.NewSimpleClientset(objects...)
eventClient := fakeclientset.NewSimpleClientset(objects...) eventClient := fakeclientset.NewSimpleClientset(objects...)
@@ -187,13 +153,11 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = featureGates
rs.MetricsClient = metricsClient
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields)) sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client) eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory, nil) descheduler, err := newDescheduler(rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
if err != nil { if err != nil {
eventBroadcaster.Shutdown() eventBroadcaster.Shutdown()
t.Fatalf("Unable to create a descheduler instance: %v", err) t.Fatalf("Unable to create a descheduler instance: %v", err)
@@ -224,7 +188,6 @@ func TestTaintsUpdated(t *testing.T) {
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = initFeatureGates()
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@@ -248,7 +211,7 @@ func TestTaintsUpdated(t *testing.T) {
} }
var evictedPods []string var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil)) client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil { if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err) t.Fatalf("Unable to run descheduler strategies: %v", err)
@@ -287,7 +250,6 @@ func TestDuplicate(t *testing.T) {
} }
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DefaultFeatureGates = initFeatureGates()
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{}) pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@@ -299,7 +261,7 @@ func TestDuplicate(t *testing.T) {
} }
var evictedPods []string var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil)) client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil { if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err) t.Fatalf("Unable to run descheduler strategies: %v", err)
@@ -327,7 +289,6 @@ func TestRootCancel(t *testing.T) {
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond rs.DeschedulingInterval = 100 * time.Millisecond
rs.DefaultFeatureGates = initFeatureGates()
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -363,7 +324,6 @@ func TestRootCancelWithNoInterval(t *testing.T) {
rs.Client = client rs.Client = client
rs.EventClient = eventClient rs.EventClient = eventClient
rs.DeschedulingInterval = 0 rs.DeschedulingInterval = 0
rs.DefaultFeatureGates = initFeatureGates()
errChan := make(chan error, 1) errChan := make(chan error, 1)
defer close(errChan) defer close(errChan)
@@ -442,7 +402,7 @@ func TestValidateVersionCompatibility(t *testing.T) {
} }
} }
func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) { func podEvictionReactionTestingFnc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) { return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" { if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl) createAct, matched := action.(core.CreateActionImpl)
@@ -450,14 +410,7 @@ func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackgroun
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl") return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
} }
if eviction, matched := createAct.Object.(*policy.Eviction); matched { if eviction, matched := createAct.Object.(*policy.Eviction); matched {
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
return true, nil, tooManyRequestsError
}
if evictionErr != nil {
return true, nil, evictionErr
}
*evictedPods = append(*evictedPods, eviction.GetName()) *evictedPods = append(*evictedPods, eviction.GetName())
return true, nil, nil
} }
} }
return false, nil, nil // fallback to the default reactor return false, nil, nil // fallback to the default reactor
@@ -493,15 +446,15 @@ func TestPodEvictorReset(t *testing.T) {
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy() internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx) ctxCancel, cancel := context.WithCancel(ctx)
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2) rs, descheduler, client := initDescheduler(t, ctxCancel, internalDeschedulerPolicy, node1, node2, p1, p2)
defer cancel() defer cancel()
var evictedPods []string var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil)) client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods))
var fakeEvictedPods []string var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) { descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil) return podEvictionReactionTestingFnc(&fakeEvictedPods)
} }
// a single pod eviction expected // a single pod eviction expected
@@ -544,138 +497,6 @@ func TestPodEvictorReset(t *testing.T) {
} }
} }
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
}
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
}
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
}
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
}
func TestEvictionRequestsCache(t *testing.T) {
initPluginRegistry()
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2}
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
pod.Status.Phase = v1.PodRunning
}
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
evictions.EvictionRequestAnnotationKey: "",
}
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
ctxCancel, cancel := context.WithCancel(ctx)
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
defer cancel()
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
klog.Infof("2 evictions in background expected, 2 normal evictions")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
// without recreating the pods.
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Eviction in background got initiated")
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Another eviction in background got initiated")
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
klog.Infof("Scenario: Eviction in background completed")
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
klog.Infof("Scenario: A new pod without eviction in background added")
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
t.Fatalf("unable to create a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to update a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
checkTotals(t, ctx, descheduler, 0, 3)
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
p2.Status.Phase = v1.PodSucceeded
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
time.Sleep(100 * time.Millisecond)
klog.Infof("Check the number of evictions in background decreased")
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
}
func TestDeschedulingLimits(t *testing.T) { func TestDeschedulingLimits(t *testing.T) {
initPluginRegistry() initPluginRegistry()
@@ -719,13 +540,6 @@ func TestDeschedulingLimits(t *testing.T) {
pod.ObjectMeta.OwnerReferences = ownerRef1 pod.ObjectMeta.OwnerReferences = ownerRef1
} }
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
evictions.EvictionRequestAnnotationKey: "",
}
}
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
ctx := context.Background() ctx := context.Background()
@@ -733,59 +547,39 @@ func TestDeschedulingLimits(t *testing.T) {
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
nodes := []*v1.Node{node1, node2} nodes := []*v1.Node{node1, node2}
ctxCancel, cancel := context.WithCancel(ctx) ctxCancel, cancel := context.WithCancel(ctx)
featureGates := featuregate.NewFeatureGate() _, descheduler, client := initDescheduler(t, ctxCancel, tc.policy, node1, node2)
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
defer cancel() defer cancel()
var fakeEvictedPods []string
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
rand.Seed(time.Now().UnixNano())
pods := []*v1.Pod{ pods := []*v1.Pod{
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground), test.BuildTestPod("p1", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground), test.BuildTestPod("p2", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod), test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod), test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod), test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
} }
for i := 0; i < 10; i++ { for j := 0; j < 5; j++ {
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] }) idx := j
func() { if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
for j := 0; j < 5; j++ { t.Fatalf("unable to create a pod: %v", err)
idx := j }
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil { defer func() {
t.Fatalf("unable to create a pod: %v", err) if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
} t.Fatalf("unable to delete a pod: %v", err)
defer func() {
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unable to delete a pod: %v", err)
}
}()
} }
time.Sleep(100 * time.Millisecond)
klog.Infof("2 evictions in background expected, 2 normal evictions")
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
totalERs := descheduler.podEvictor.TotalEvictionRequests()
totalEs := descheduler.podEvictor.TotalEvicted()
if totalERs+totalEs > tc.limit {
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
}
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
}() }()
} }
time.Sleep(100 * time.Millisecond)
err := descheduler.runDeschedulerLoop(ctx, nodes)
if err != nil {
t.Fatalf("Unable to run a descheduling loop: %v", err)
}
totalEs := descheduler.podEvictor.TotalEvicted()
if totalEs > tc.limit {
t.Fatalf("Expected %v evictions in total, got %v instead", tc.limit, totalEs)
}
t.Logf("Total evictions: %v", totalEs)
}) })
} }
} }
@@ -809,6 +603,24 @@ func TestLoadAwareDescheduling(t *testing.T) {
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod) p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod) p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
ctxCancel, cancel := context.WithCancel(ctx)
_, descheduler, client := initDescheduler(
t,
ctxCancel,
lowNodeUtilizationPolicy(
api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
true, // enabled metrics utilization
),
node1, node2, p1, p2, p3, p4, p5)
defer cancel()
nodemetricses := []*v1beta1.NodeMetrics{ nodemetricses := []*v1beta1.NodeMetrics{
test.BuildNodeMetrics("n1", 2400, 3000), test.BuildNodeMetrics("n1", 2400, 3000),
test.BuildNodeMetrics("n2", 400, 0), test.BuildNodeMetrics("n2", 400, 0),
@@ -822,39 +634,16 @@ func TestLoadAwareDescheduling(t *testing.T) {
test.BuildPodMetrics("p5", 400, 0), test.BuildPodMetrics("p5", 400, 0),
} }
metricsClientset := fakemetricsclient.NewSimpleClientset() var metricsObjs []runtime.Object
for _, nodemetrics := range nodemetricses { for _, nodemetrics := range nodemetricses {
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "") metricsObjs = append(metricsObjs, nodemetrics)
} }
for _, podmetrics := range podmetricses { for _, podmetrics := range podmetricses {
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace) metricsObjs = append(metricsObjs, podmetrics)
} }
policy := lowNodeUtilizationPolicy( metricsClientset := fakemetricsclient.NewSimpleClientset(metricsObjs...)
api.ResourceThresholds{ descheduler.metricsCollector = metricscollector.NewMetricsCollector(client, metricsClientset)
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
true, // enabled metrics utilization
)
policy.MetricsProviders = []api.MetricsProvider{{Source: api.KubernetesMetrics}}
ctxCancel, cancel := context.WithCancel(ctx)
_, descheduler, _ := initDescheduler(
t,
ctxCancel,
initFeatureGates(),
policy,
metricsClientset,
node1, node2, p1, p2, p3, p4, p5)
defer cancel()
// This needs to be run since the metrics collector is started
// after newDescheduler in RunDeschedulerStrategies.
descheduler.metricsCollector.Collect(ctx) descheduler.metricsCollector.Collect(ctx)
err := descheduler.runDeschedulerLoop(ctx, nodes) err := descheduler.runDeschedulerLoop(ctx, nodes)
@@ -862,7 +651,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
t.Fatalf("Unable to run a descheduling loop: %v", err) t.Fatalf("Unable to run a descheduling loop: %v", err)
} }
totalEs := descheduler.podEvictor.TotalEvicted() totalEs := descheduler.podEvictor.TotalEvicted()
if totalEs != 2 { if totalEs != 2 {
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs) t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
} }
t.Logf("Total evictions: %v", totalEs) t.Logf("Total evictions: %v", totalEs)

View File

@@ -19,9 +19,7 @@ package evictions
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"sync" "sync"
"time"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@@ -29,176 +27,15 @@ import (
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/metrics" "sigs.k8s.io/descheduler/metrics"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils" eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/tracing" "sigs.k8s.io/descheduler/pkg/tracing"
) )
var (
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
)
type evictionRequestItem struct {
podName, podNamespace, podNodeName string
evictionAssumed bool
assumedTimestamp metav1.Time
}
type evictionRequestsCache struct {
mu sync.RWMutex
requests map[string]evictionRequestItem
requestsPerNode map[string]uint
requestsPerNamespace map[string]uint
requestsTotal uint
assumedRequestTimeoutSeconds uint
}
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
return &evictionRequestsCache{
requests: make(map[string]evictionRequestItem),
requestsPerNode: make(map[string]uint),
requestsPerNamespace: make(map[string]uint),
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
}
}
func (erc *evictionRequestsCache) run(ctx context.Context) {
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
}
// cleanCache removes all assumed entries that has not been confirmed
// for more than a specified timeout
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
erc.mu.Lock()
defer erc.mu.Unlock()
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
for uid, item := range erc.requests {
if item.evictionAssumed {
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
erc.deleteItem(uid)
}
}
}
}
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsPerNode[nodeName]
}
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsPerNamespace[ns]
}
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return erc.requestsTotal
}
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
erc.mu.RLock()
defer erc.mu.RUnlock()
return uint(len(erc.requests))
}
// getPodKey returns the string key of a pod.
func getPodKey(pod *v1.Pod) string {
uid := string(pod.UID)
// Every pod is expected to have the UID set.
// When the descheduling framework is used for simulation
// user created workload may forget to set the UID.
if len(uid) == 0 {
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
}
return uid
}
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
return
}
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
erc.requestsPerNode[pod.Spec.NodeName]++
erc.requestsPerNamespace[pod.Namespace]++
erc.requestsTotal++
}
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
return
}
erc.requests[uid] = evictionRequestItem{
podNamespace: pod.Namespace,
podName: pod.Name,
podNodeName: pod.Spec.NodeName,
evictionAssumed: true,
assumedTimestamp: metav1.NewTime(time.Now()),
}
erc.requestsPerNode[pod.Spec.NodeName]++
erc.requestsPerNamespace[pod.Namespace]++
erc.requestsTotal++
}
// no locking, expected to be invoked from protected methods only
func (erc *evictionRequestsCache) deleteItem(uid string) {
erc.requestsPerNode[erc.requests[uid].podNodeName]--
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
}
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
}
erc.requestsTotal--
delete(erc.requests, uid)
}
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
erc.mu.Lock()
defer erc.mu.Unlock()
uid := getPodKey(pod)
if _, exists := erc.requests[uid]; exists {
erc.deleteItem(uid)
}
}
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
erc.mu.RLock()
defer erc.mu.RUnlock()
uid := getPodKey(pod)
_, exists := erc.requests[uid]
return exists
}
var (
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
)
// nodePodEvictedCount keeps count of pods evicted on node // nodePodEvictedCount keeps count of pods evicted on node
type ( type (
nodePodEvictedCount map[string]uint nodePodEvictedCount map[string]uint
@@ -206,186 +43,54 @@ type (
) )
type PodEvictor struct { type PodEvictor struct {
mu sync.RWMutex mu sync.Mutex
client clientset.Interface client clientset.Interface
policyGroupVersion string policyGroupVersion string
dryRun bool dryRun bool
evictionFailureEventNotification bool maxPodsToEvictPerNode *uint
maxPodsToEvictPerNode *uint maxPodsToEvictPerNamespace *uint
maxPodsToEvictPerNamespace *uint maxPodsToEvictTotal *uint
maxPodsToEvictTotal *uint nodePodCount nodePodEvictedCount
gracePeriodSeconds *int64 namespacePodCount namespacePodEvictCount
nodePodCount nodePodEvictedCount totalPodCount uint
namespacePodCount namespacePodEvictCount metricsEnabled bool
totalPodCount uint eventRecorder events.EventRecorder
metricsEnabled bool
eventRecorder events.EventRecorder
erCache *evictionRequestsCache
featureGates featuregate.FeatureGate
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
registeredHandlers []cache.ResourceEventHandlerRegistration
} }
func NewPodEvictor( func NewPodEvictor(
ctx context.Context,
client clientset.Interface, client clientset.Interface,
eventRecorder events.EventRecorder, eventRecorder events.EventRecorder,
podInformer cache.SharedIndexInformer,
featureGates featuregate.FeatureGate,
options *Options, options *Options,
) (*PodEvictor, error) { ) *PodEvictor {
if options == nil { if options == nil {
options = NewOptions() options = NewOptions()
} }
podEvictor := &PodEvictor{ return &PodEvictor{
client: client, client: client,
eventRecorder: eventRecorder, eventRecorder: eventRecorder,
policyGroupVersion: options.policyGroupVersion, policyGroupVersion: options.policyGroupVersion,
dryRun: options.dryRun, dryRun: options.dryRun,
evictionFailureEventNotification: options.evictionFailureEventNotification, maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode, maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace, maxPodsToEvictTotal: options.maxPodsToEvictTotal,
maxPodsToEvictTotal: options.maxPodsToEvictTotal, metricsEnabled: options.metricsEnabled,
gracePeriodSeconds: options.gracePeriodSeconds, nodePodCount: make(nodePodEvictedCount),
metricsEnabled: options.metricsEnabled, namespacePodCount: make(namespacePodEvictCount),
nodePodCount: make(nodePodEvictedCount),
namespacePodCount: make(namespacePodEvictCount),
featureGates: featureGates,
} }
if featureGates.Enabled(features.EvictionsInBackground) {
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
handlerRegistration, err := podInformer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
pod, ok := obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
return
}
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
// Ignore completed/suceeeded or failed pods
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
erCache.addPod(pod)
}
}
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldPod, ok := oldObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
return
}
newPod, ok := newObj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
return
}
// Ignore pod's that are not subject to an eviction in background
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
if erCache.hasPod(newPod) {
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
}
erCache.deletePod(newPod)
return
}
// Remove completed/suceeeded or failed pods from the cache
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
erCache.deletePod(newPod)
return
}
// Ignore any pod that does not have eviction in progress
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
// In case EvictionInProgressAnnotationKey annotation is not present/removed
// it's unclear whether the eviction was restarted or terminated.
// If the eviction gets restarted the pod needs to be removed from the cache
// to allow re-triggering the eviction.
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
return
}
// the annotation was removed -> remove the pod from the cache to allow to
// request for eviction again. In case the eviction got restarted requesting
// the eviction again is expected to be a no-op. In case the eviction
// got terminated with no-retry, requesting a new eviction is a normal
// operation.
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
erCache.deletePod(newPod)
return
}
// Pick up the eviction in progress
if !erCache.hasPod(newPod) {
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
}
erCache.addPod(newPod)
},
DeleteFunc: func(obj interface{}) {
var pod *v1.Pod
switch t := obj.(type) {
case *v1.Pod:
pod = t
case cache.DeletedFinalStateUnknown:
var ok bool
pod, ok = t.Obj.(*v1.Pod)
if !ok {
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
return
}
default:
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
return
}
if erCache.hasPod(pod) {
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
}
erCache.deletePod(pod)
},
},
)
if err != nil {
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
}
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
go erCache.run(ctx)
podEvictor.erCache = erCache
}
return podEvictor, nil
}
// WaitForEventHandlersSync waits for EventHandlers to sync.
// It returns true if it was successful, false if the controller should shut down
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
for _, handler := range pe.registeredHandlers {
if !handler.HasSynced() {
return false, nil
}
}
return true, nil
})
} }
// NodeEvicted gives a number of pods evicted for node // NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint { func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
pe.mu.RLock() pe.mu.Lock()
defer pe.mu.RUnlock() defer pe.mu.Unlock()
return pe.nodePodCount[node.Name] return pe.nodePodCount[node.Name]
} }
// TotalEvicted gives a number of pods evicted through all nodes // TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() uint { func (pe *PodEvictor) TotalEvicted() uint {
pe.mu.RLock() pe.mu.Lock()
defer pe.mu.RUnlock() defer pe.mu.Unlock()
return pe.totalPodCount return pe.totalPodCount
} }
@@ -403,46 +108,6 @@ func (pe *PodEvictor) SetClient(client clientset.Interface) {
pe.client = client pe.client = client
} }
func (pe *PodEvictor) evictionRequestsTotal() uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsTotal()
} else {
return 0
}
}
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsPerNode(node)
} else {
return 0
}
}
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.evictionRequestsPerNamespace(ns)
} else {
return 0
}
}
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
pe.mu.RLock()
defer pe.mu.RUnlock()
return pe.evictionRequestsTotal()
}
func (pe *PodEvictor) TotalEvictionRequests() uint {
pe.mu.RLock()
defer pe.mu.RUnlock()
if pe.featureGates.Enabled(features.EvictionsInBackground) {
return pe.erCache.TotalEvictionRequests()
} else {
return 0
}
}
// EvictOptions provides a handle for passing additional info to EvictPod // EvictOptions provides a handle for passing additional info to EvictPod
type EvictOptions struct { type EvictOptions struct {
// Reason allows for passing details about the specific eviction for logging. // Reason allows for passing details about the specific eviction for logging.
@@ -456,70 +121,45 @@ type EvictOptions struct {
// EvictPod evicts a pod while exercising eviction limits. // EvictPod evicts a pod while exercising eviction limits.
// Returns true when the pod is evicted on the server side. // Returns true when the pod is evicted on the server side.
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error { func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
if len(pod.UID) == 0 {
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
}
if pe.featureGates.Enabled(features.EvictionsInBackground) {
// eviction in background requested
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
if pe.erCache.hasPod(pod) {
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
return nil
}
}
}
pe.mu.Lock() pe.mu.Lock()
defer pe.mu.Unlock() defer pe.mu.Unlock()
var span trace.Span var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation))) ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
defer span.End() defer span.End()
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal { if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+1 > *pe.maxPodsToEvictTotal {
err := NewEvictionTotalLimitError() err := NewEvictionTotalLimitError()
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error()))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal) klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
}
return err return err
} }
if pod.Spec.NodeName != "" { if pod.Spec.NodeName != "" {
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode { if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
err := NewEvictionNodeLimitError(pod.Spec.NodeName) err := NewEvictionNodeLimitError(pod.Spec.NodeName)
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error()))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName) klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
}
return err return err
} }
} }
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace { if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
err := NewEvictionNamespaceLimitError(pod.Namespace) err := NewEvictionNamespaceLimitError(pod.Namespace)
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error()))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod)) klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
}
return err return err
} }
ignore, err := pe.evictPod(ctx, pod) err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
if err != nil { if err != nil {
// err is used only for logging purposes // err is used only for logging purposes
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error()))) span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
@@ -527,16 +167,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
if pe.metricsEnabled { if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc() metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
} }
if pe.evictionFailureEventNotification {
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
}
return err return err
} }
if ignore {
return nil
}
if pod.Spec.NodeName != "" { if pod.Spec.NodeName != "" {
pe.nodePodCount[pod.Spec.NodeName]++ pe.nodePodCount[pod.Spec.NodeName]++
} }
@@ -558,20 +191,17 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
reason = "NotSet" reason = "NotSet"
} }
} }
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName) pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
} }
return nil return nil
} }
// return (ignore, err) func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) { deleteOptions := &metav1.DeleteOptions{}
deleteOptions := &metav1.DeleteOptions{
GracePeriodSeconds: pe.gracePeriodSeconds,
}
// GracePeriodSeconds ? // GracePeriodSeconds ?
eviction := &policy.Eviction{ eviction := &policy.Eviction{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
APIVersion: pe.policyGroupVersion, APIVersion: policyGroupVersion,
Kind: eutils.EvictionKind, Kind: eutils.EvictionKind,
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -580,36 +210,13 @@ func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
}, },
DeleteOptions: deleteOptions, DeleteOptions: deleteOptions,
} }
err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction) err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
if err == nil {
return false, nil
}
if pe.featureGates.Enabled(features.EvictionsInBackground) {
// eviction in background requested
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
// Ignore eviction of any pod that's failed or completed.
// It can happen an eviction in background ends up with the pod stuck in the completed state.
// Normally, any request eviction is expected to end with the pod deletion.
// However, some custom eviction policies may end up with completed pods around.
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
return true, nil
}
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
pe.erCache.assumePod(pod)
return true, nil
}
}
}
if apierrors.IsTooManyRequests(err) { if apierrors.IsTooManyRequests(err) {
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err) return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
} }
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err) return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
} }
return false, err return err
} }

View File

@@ -18,107 +18,57 @@ package evictions
import ( import (
"context" "context"
"fmt"
"reflect"
"testing" "testing"
"time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/events" "k8s.io/client-go/tools/events"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr" utilptr "k8s.io/utils/ptr"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
const (
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
)
func initFeatureGates() featuregate.FeatureGate {
featureGates := featuregate.NewFeatureGate()
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
})
return featureGates
}
func TestEvictPod(t *testing.T) { func TestEvictPod(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil) node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil) pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
tests := []struct { tests := []struct {
description string description string
node *v1.Node node *v1.Node
evictedPod *v1.Pod pod *v1.Pod
pods []runtime.Object pods []v1.Pod
wantErr error want error
}{ }{
{ {
description: "test pod eviction - pod present", description: "test pod eviction - pod present",
node: node1, node: node1,
evictedPod: pod1, pod: pod1,
pods: []runtime.Object{pod1}, pods: []v1.Pod{*pod1},
want: nil,
}, },
{ {
description: "test pod eviction - pod absent (not found error)", description: "test pod eviction - pod absent",
node: node1, node: node1,
evictedPod: pod1, pod: pod1,
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)}, pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name), want: nil,
},
{
description: "test pod eviction - pod absent (too many requests error)",
node: node1,
evictedPod: pod1,
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
}, },
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.description, func(t *testing.T) { fakeClient := &fake.Clientset{}
ctx := context.Background() fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
fakeClient := fake.NewClientset(test.pods...) return true, &v1.PodList{Items: test.pods}, nil
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, test.wantErr
})
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := &events.FakeRecorder{}
podEvictor, err := NewPodEvictor(
ctx,
fakeClient,
eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(),
initFeatureGates(),
NewOptions(),
)
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
_, got := podEvictor.evictPod(ctx, test.evictedPod)
if got != test.wantErr {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
}
}) })
got := evictPod(ctx, fakeClient, test.pod, "v1")
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
} }
} }
@@ -168,317 +118,50 @@ func TestPodTypes(t *testing.T) {
} }
func TestNewPodEvictor(t *testing.T) { func TestNewPodEvictor(t *testing.T) {
ctx := context.Background()
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil) pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
type podEvictorTest struct {
description string
pod *v1.Pod
dryRun bool
evictionFailureEventNotification *bool
maxPodsToEvictTotal *uint
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
expectedNodeEvictions uint
expectedTotalEvictions uint
expectedError error
// expectedEvent is a slice of strings representing expected events.
// Each string in the slice should follow the format: "EventType Reason Message".
// - "Warning Failed processing failed"
events []string
}
tests := []podEvictorTest{
{
description: "one eviction expected with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
},
{
description: "eviction limit exceeded on total with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](0),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionTotalLimitError(),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
},
{
description: "eviction limit exceeded on node with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](0),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNodeLimitError("node"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
},
{
description: "eviction limit exceeded on node with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNamespaceLimitError("default"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
},
{
description: "eviction error with eviction failure event notification",
pod: pod1,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: fmt.Errorf("eviction error"),
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
},
{
description: "eviction with dryRun with eviction failure event notification",
pod: pod1,
dryRun: true,
evictionFailureEventNotification: utilptr.To[bool](true),
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
},
{
description: "one eviction expected without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
},
{
description: "eviction limit exceeded on total without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](0),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionTotalLimitError(),
},
{
description: "eviction limit exceeded on node without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](0),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNodeLimitError("node"),
},
{
description: "eviction limit exceeded on node without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: NewEvictionNamespaceLimitError("default"),
},
{
description: "eviction error without eviction failure event notification",
pod: pod1,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 0,
expectedTotalEvictions: 0,
expectedError: fmt.Errorf("eviction error"),
},
{
description: "eviction without dryRun with eviction failure event notification",
pod: pod1,
dryRun: true,
maxPodsToEvictTotal: utilptr.To[uint](1),
maxPodsToEvictPerNode: utilptr.To[uint](1),
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
expectedNodeEvictions: 1,
expectedTotalEvictions: 1,
expectedError: nil,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
fakeClient := fake.NewSimpleClientset(pod1)
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, test.expectedError
})
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) fakeClient := fake.NewSimpleClientset(pod1)
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
eventRecorder := events.NewFakeRecorder(100) eventRecorder := &events.FakeRecorder{}
podEvictor, err := NewPodEvictor( podEvictor := NewPodEvictor(
ctx, fakeClient,
fakeClient,
eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(),
initFeatureGates(),
NewOptions().
WithDryRun(test.dryRun).
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
)
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
}
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
}
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
}
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
}
// Assert that the events are correct.
assertEqualEvents(t, test.events, eventRecorder.Events)
})
}
}
func TestEvictionRequestsCacheCleanup(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
ownerRef1 := test.GetReplicaSetOwnerRefList()
updatePod := func(pod *v1.Pod) {
pod.Namespace = "dev"
pod.ObjectMeta.OwnerReferences = ownerRef1
}
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
updatePod(pod)
pod.Annotations = map[string]string{
EvictionRequestAnnotationKey: "",
}
}
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
podEvictor, err := NewPodEvictor(
ctx,
client,
eventRecorder, eventRecorder,
sharedInformerFactory.Core().V1().Pods().Informer(), NewOptions().WithMaxPodsToEvictPerNode(utilptr.To[uint](1)),
initFeatureGates(),
nil,
) )
if err != nil {
t.Fatalf("Unexpected error when creating a pod evictor: %v", err) stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
// 0 evictions expected
if evictions := podEvictor.NodeEvicted(stubNode); evictions != 0 {
t.Errorf("Expected 0 node evictions, got %q instead", evictions)
}
// 0 evictions expected
if evictions := podEvictor.TotalEvicted(); evictions != 0 {
t.Errorf("Expected 0 total evictions, got %q instead", evictions)
} }
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) { if err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{}); err != nil {
if action.GetSubresource() == "eviction" { t.Errorf("Expected a pod eviction, got an eviction error instead: %v", err)
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
podName := eviction.GetName()
if podName == "p1" || podName == "p2" {
return true, nil, &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonTooManyRequests,
Message: "Eviction triggered evacuation",
},
}
}
return true, nil, nil
}
}
return false, nil, nil
})
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor.EvictPod(ctx, p1, EvictOptions{})
podEvictor.EvictPod(ctx, p2, EvictOptions{})
podEvictor.EvictPod(ctx, p3, EvictOptions{})
podEvictor.EvictPod(ctx, p4, EvictOptions{})
klog.Infof("2 evictions in background expected, 2 normal evictions")
if total := podEvictor.TotalEvictionRequests(); total != 2 {
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
}
if total := podEvictor.TotalEvicted(); total != 2 {
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
} }
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out") // 1 node eviction expected
time.Sleep(2 * time.Second) if evictions := podEvictor.NodeEvicted(stubNode); evictions != 1 {
klog.Infof("Checking the assumed requests timed out and were deleted") t.Errorf("Expected 1 node eviction, got %q instead", evictions)
// Set the timeout to 1s so the cleaning can be tested }
podEvictor.erCache.assumedRequestTimeoutSeconds = 1 // 1 total eviction expected
podEvictor.erCache.cleanCache(ctx) if evictions := podEvictor.TotalEvicted(); evictions != 1 {
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 { t.Errorf("Expected 1 total evictions, got %q instead", evictions)
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs) }
}
} err := podEvictor.EvictPod(context.TODO(), pod1, EvictOptions{})
if err == nil {
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) { t.Errorf("Expected a pod eviction error, got nil instead")
t.Logf("Assert for events: %v", expected) }
c := time.After(wait.ForeverTestTimeout) switch err.(type) {
for _, e := range expected { case *EvictionNodeLimitError:
select { // all good
case a := <-actual: default:
if !reflect.DeepEqual(a, e) { t.Errorf("Expected a pod eviction EvictionNodeLimitError error, got a different error instead: %v", err)
t.Errorf("Expected event %q, got %q instead", e, a)
}
case <-c:
t.Errorf("Expected event %q, got nothing", e)
// continue iterating to print all expected events
}
}
for {
select {
case a := <-actual:
t.Errorf("Unexpected event: %q", a)
default:
return // No more events, as expected.
}
} }
} }

View File

@@ -5,14 +5,12 @@ import (
) )
type Options struct { type Options struct {
policyGroupVersion string policyGroupVersion string
dryRun bool dryRun bool
maxPodsToEvictPerNode *uint maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint maxPodsToEvictPerNamespace *uint
maxPodsToEvictTotal *uint maxPodsToEvictTotal *uint
evictionFailureEventNotification bool metricsEnabled bool
metricsEnabled bool
gracePeriodSeconds *int64
} }
// NewOptions returns an Options with default values. // NewOptions returns an Options with default values.
@@ -47,19 +45,7 @@ func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
return o return o
} }
func (o *Options) WithGracePeriodSeconds(gracePeriodSeconds *int64) *Options {
o.gracePeriodSeconds = gracePeriodSeconds
return o
}
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options { func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
o.metricsEnabled = metricsEnabled o.metricsEnabled = metricsEnabled
return o return o
} }
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
if evictionFailureEventNotification != nil {
o.evictionFailureEventNotification = *evictionFailureEventNotification
}
return o
}

View File

@@ -23,16 +23,15 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
listercorev1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
utilptr "k8s.io/utils/ptr" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
) )
const ( const (
@@ -40,23 +39,19 @@ const (
) )
type MetricsCollector struct { type MetricsCollector struct {
nodeLister listercorev1.NodeLister clientset kubernetes.Interface
metricsClientset metricsclient.Interface metricsClientset metricsclient.Interface
nodeSelector labels.Selector
nodes map[string]api.ReferencedResourceList nodes map[string]map[v1.ResourceName]*resource.Quantity
mu sync.RWMutex mu sync.Mutex
// hasSynced signals at least one sync succeeded
hasSynced bool
} }
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector { func NewMetricsCollector(clientset kubernetes.Interface, metricsClientset metricsclient.Interface) *MetricsCollector {
return &MetricsCollector{ return &MetricsCollector{
nodeLister: nodeLister, clientset: clientset,
metricsClientset: metricsClientset, metricsClientset: metricsClientset,
nodeSelector: nodeSelector, nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
nodes: make(map[string]api.ReferencedResourceList),
} }
} }
@@ -66,51 +61,24 @@ func (mc *MetricsCollector) Run(ctx context.Context) {
}, 5*time.Second, ctx.Done()) }, 5*time.Second, ctx.Done())
} }
// During experiments rounding to int error causes weightedAverage to never
// reach value even when weightedAverage is repeated many times in a row.
// The difference between the limit and computed average stops within 5 units.
// Nevertheless, the value is expected to change in time. So the weighted
// average nevers gets a chance to converge. Which makes the computed
// error negligible.
// The speed of convergence depends on how often the metrics collector
// syncs with the current value. Currently, the interval is set to 5s.
func weightedAverage(prevValue, value int64) int64 { func weightedAverage(prevValue, value int64) int64 {
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value))) return int64(math.Floor(beta*float64(prevValue) + (1-beta)*float64(value)))
} }
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) { func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
mc.mu.RLock() mc.mu.Lock()
defer mc.mu.RUnlock() defer mc.mu.Unlock()
allNodesUsage := make(map[string]api.ReferencedResourceList)
for nodeName := range mc.nodes {
allNodesUsage[nodeName] = api.ReferencedResourceList{
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
}
}
return allNodesUsage, nil
}
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
mc.mu.RLock()
defer mc.mu.RUnlock()
if _, exists := mc.nodes[node.Name]; !exists { if _, exists := mc.nodes[node.Name]; !exists {
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node)) klog.V(4).Infof("unable to find node %q in the collected metrics", node.Name)
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name) return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
} }
return api.ReferencedResourceList{ return map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()), v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()), v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
}, nil }, nil
} }
func (mc *MetricsCollector) HasSynced() bool {
return mc.hasSynced
}
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface { func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
return mc.metricsClientset return mc.metricsClientset
} }
@@ -118,21 +86,21 @@ func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
func (mc *MetricsCollector) Collect(ctx context.Context) error { func (mc *MetricsCollector) Collect(ctx context.Context) error {
mc.mu.Lock() mc.mu.Lock()
defer mc.mu.Unlock() defer mc.mu.Unlock()
nodes, err := mc.nodeLister.List(mc.nodeSelector) nodes, err := mc.clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("unable to list nodes: %v", err) return fmt.Errorf("unable to list nodes: %v", err)
} }
for _, node := range nodes { for _, node := range nodes.Items {
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{}) metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil { if err != nil {
klog.ErrorS(err, "Error fetching metrics", "node", node.Name) fmt.Printf("Error fetching metrics for node %s: %v\n", node.Name, err)
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV // No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
continue continue
} }
if _, exists := mc.nodes[node.Name]; !exists { if _, exists := mc.nodes[node.Name]; !exists {
mc.nodes[node.Name] = api.ReferencedResourceList{ mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()), v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()), v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
} }
@@ -141,12 +109,17 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error {
mc.nodes[node.Name][v1.ResourceCPU].SetMilli( mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()), weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
) )
mc.nodes[node.Name][v1.ResourceMemory].Set( mc.nodes[node.Name][v1.ResourceMemory].SetMilli(
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()), weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].MilliValue(), metrics.Usage.Memory().MilliValue()),
) )
} }
// Display CPU and memory usage
// fmt.Printf("%s: %vm, %vMi\n", node.Name, metrics.Usage.Cpu().MilliValue(), metrics.Usage.Memory().Value()/(1024*1024))
// fmt.Printf("%s: %vm, %vMi\n", node.Name, mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), mc.nodes[node.Name][v1.ResourceMemory].Value()/(1024*1024))
} }
mc.hasSynced = true fmt.Printf("--\n")
return nil return nil
} }

View File

@@ -18,30 +18,57 @@ package metricscollector
import ( import (
"context" "context"
"math" "os"
"testing" "testing"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake" fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) { func TestMetricsCollector1(t *testing.T) {
kubeconfig := os.Getenv("KUBECONFIG")
// Use the kubeconfig to build the Kubernetes client
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
panic(err.Error())
}
// Create the standard Kubernetes clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
// Create the metrics clientset to access the metrics.k8s.io API
metricsClientset, err := metricsclient.NewForConfig(config)
if err != nil {
panic(err.Error())
}
collector := NewMetricsCollector(clientset, metricsClientset)
collector.Run(context.TODO())
// collector.Collect(context.TODO())
}
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue()) t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
if usage[v1.ResourceCPU].MilliValue() != millicpu { if usage[v1.ResourceCPU].MilliValue() != millicpu {
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue()) t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
} }
} }
func TestMetricsCollector(t *testing.T) { func TestMetricsCollector2(t *testing.T) {
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"} gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodemetricses"}
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil) n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil) n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
@@ -52,24 +79,13 @@ func TestMetricsCollector(t *testing.T) {
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816) n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3) clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
metricsClientset := fakemetricsclient.NewSimpleClientset() metricsClientset := fakemetricsclient.NewSimpleClientset(n1metrics, n2metrics, n3metrics)
metricsClientset.Tracker().Create(gvr, n1metrics, "")
metricsClientset.Tracker().Create(gvr, n2metrics, "")
metricsClientset.Tracker().Create(gvr, n3metrics, "")
ctx := context.TODO()
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
t.Logf("Set initial node cpu usage to 1400") t.Logf("Set initial node cpu usage to 1400")
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything()) collector := NewMetricsCollector(clientset, metricsClientset)
collector.Collect(context.TODO()) collector.Collect(context.TODO())
nodesUsage, _ := collector.NodeUsage(n2) nodesUsage, _ := collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1400) checkCpuNodeUsage(t, nodesUsage, 1400)
allnodesUsage, _ := collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
t.Logf("Set current node cpu usage to 500") t.Logf("Set current node cpu usage to 500")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI) n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
@@ -77,66 +93,11 @@ func TestMetricsCollector(t *testing.T) {
collector.Collect(context.TODO()) collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2) nodesUsage, _ = collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1310) checkCpuNodeUsage(t, nodesUsage, 1310)
allnodesUsage, _ = collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
t.Logf("Set current node cpu usage to 900") t.Logf("Set current node cpu usage to 500")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI) n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
metricsClientset.Tracker().Update(gvr, n2metrics, "") metricsClientset.Tracker().Update(gvr, n2metrics, "")
collector.Collect(context.TODO()) collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2) nodesUsage, _ = collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1269) checkCpuNodeUsage(t, nodesUsage, 1268)
allnodesUsage, _ = collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
}
func TestMetricsCollectorConvergence(t *testing.T) {
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
metricsClientset := fakemetricsclient.NewSimpleClientset()
metricsClientset.Tracker().Create(gvr, n1metrics, "")
metricsClientset.Tracker().Create(gvr, n2metrics, "")
metricsClientset.Tracker().Create(gvr, n3metrics, "")
ctx := context.TODO()
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
t.Logf("Set initial node cpu usage to 1400")
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
collector.Collect(context.TODO())
nodesUsage, _ := collector.NodeUsage(n2)
checkCpuNodeUsage(t, nodesUsage, 1400)
allnodesUsage, _ := collector.AllNodesUsage()
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
metricsClientset.Tracker().Update(gvr, n2metrics, "")
converged := false
for i := 0; i < 300; i++ {
collector.Collect(context.TODO())
nodesUsage, _ = collector.NodeUsage(n2)
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
converged = true
break
}
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
}
if !converged {
t.Fatalf("The node usage did not converged to 900+-1")
}
} }

View File

@@ -30,7 +30,6 @@ import (
listersv1 "k8s.io/client-go/listers/core/v1" listersv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
@@ -214,7 +213,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) { func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
// Get pod requests // Get pod requests
podRequests, _ := utils.PodRequestsAndLimits(pod) podRequests, _ := utils.PodRequestsAndLimits(pod)
resourceNames := []v1.ResourceName{v1.ResourcePods} resourceNames := make([]v1.ResourceName, 0, len(podRequests))
for name := range podRequests { for name := range podRequests {
resourceNames = append(resourceNames, name) resourceNames = append(resourceNames, name)
} }
@@ -237,7 +236,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
} }
} }
// check pod num, at least one pod number is avaibalbe // check pod num, at least one pod number is avaibalbe
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 { if availableResources[v1.ResourcePods].MilliValue() <= 0 {
return false, fmt.Errorf("insufficient %v", v1.ResourcePods) return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
} }
@@ -245,7 +244,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
} }
// nodeAvailableResources returns resources mapped to the quanitity available on the node. // nodeAvailableResources returns resources mapped to the quanitity available on the node.
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) { func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil) podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -254,18 +253,13 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
if err != nil { if err != nil {
return nil, err return nil, err
} }
remainingResources := api.ReferencedResourceList{} remainingResources := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
}
for _, name := range resourceNames { for _, name := range resourceNames {
if IsBasicResource(name) { if !IsBasicResource(name) {
switch name {
case v1.ResourceCPU:
remainingResources[name] = resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI)
case v1.ResourceMemory:
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI)
case v1.ResourcePods:
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI)
}
} else {
if _, exists := node.Status.Allocatable[name]; exists { if _, exists := node.Status.Allocatable[name]; exists {
allocatableResource := node.Status.Allocatable[name] allocatableResource := node.Status.Allocatable[name]
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI) remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
@@ -279,17 +273,14 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
} }
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated. // NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) { func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
totalUtilization := api.ReferencedResourceList{} totalUtilization := map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
}
for _, name := range resourceNames { for _, name := range resourceNames {
switch name { if !IsBasicResource(name) {
case v1.ResourceCPU:
totalUtilization[name] = resource.NewMilliQuantity(0, resource.DecimalSI)
case v1.ResourceMemory:
totalUtilization[name] = resource.NewQuantity(0, resource.BinarySI)
case v1.ResourcePods:
totalUtilization[name] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
default:
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI) totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
} }
} }

View File

@@ -19,7 +19,6 @@ package descheduler
import ( import (
"context" "context"
"fmt" "fmt"
"net/url"
"os" "os"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -63,22 +62,21 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
if err != nil { if err != nil {
return nil, err return nil, err
} }
return setDefaults(*internalPolicy, registry, client)
setDefaults(*internalPolicy, registry, client)
return internalPolicy, nil
} }
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) (*api.DeschedulerPolicy, error) { func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) *api.DeschedulerPolicy {
var err error
for idx, profile := range in.Profiles { for idx, profile := range in.Profiles {
// If we need to set defaults coming from loadtime in each profile we do it here // If we need to set defaults coming from loadtime in each profile we do it here
in.Profiles[idx], err = setDefaultEvictor(profile, client) in.Profiles[idx] = setDefaultEvictor(profile, client)
if err != nil {
return nil, err
}
for _, pluginConfig := range profile.PluginConfigs { for _, pluginConfig := range profile.PluginConfigs {
setDefaultsPluginConfig(&pluginConfig, registry) setDefaultsPluginConfig(&pluginConfig, registry)
} }
} }
return &in, nil return &in
} }
func setDefaultsPluginConfig(pluginConfig *api.PluginConfig, registry pluginregistry.Registry) { func setDefaultsPluginConfig(pluginConfig *api.PluginConfig, registry pluginregistry.Registry) {
@@ -99,7 +97,7 @@ func findPluginName(names []string, key string) bool {
return false return false
} }
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) (api.DeschedulerProfile, error) { func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) api.DeschedulerProfile {
newPluginConfig := api.PluginConfig{ newPluginConfig := api.PluginConfig{
Name: defaultevictor.PluginName, Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{ Args: &defaultevictor.DefaultEvictorArgs{
@@ -107,7 +105,6 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
EvictSystemCriticalPods: false, EvictSystemCriticalPods: false,
IgnorePvcPods: false, IgnorePvcPods: false,
EvictFailedBarePods: false, EvictFailedBarePods: false,
IgnorePodsWithoutPDB: false,
}, },
} }
@@ -130,19 +127,18 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), client, defaultevictorPluginConfig.Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold) thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), client, defaultevictorPluginConfig.Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold)
if err != nil { if err != nil {
klog.Error(err, "Failed to get threshold priority from args") klog.Error(err, "Failed to get threshold priority from args")
return profile, err
} }
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold = &api.PriorityThreshold{} profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold = &api.PriorityThreshold{}
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold.Value = &thresholdPriority profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold.Value = &thresholdPriority
return profile, nil return profile
} }
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error { func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
var errorsInPolicy []error var errorsInProfiles []error
for _, profile := range in.Profiles { for _, profile := range in.Profiles {
for _, pluginConfig := range profile.PluginConfigs { for _, pluginConfig := range profile.PluginConfigs {
if _, ok := registry[pluginConfig.Name]; !ok { if _, ok := registry[pluginConfig.Name]; !ok {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name)) errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
continue continue
} }
@@ -151,46 +147,9 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
continue continue
} }
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil { if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: %s", profile.Name, err.Error())) errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
} }
} }
} }
providers := map[api.MetricsSource]api.MetricsProvider{} return utilerrors.NewAggregate(errorsInProfiles)
for _, provider := range in.MetricsProviders {
if _, ok := providers[provider.Source]; ok {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("metric provider %q is already configured, each source can be configured only once", provider.Source))
} else {
providers[provider.Source] = provider
}
}
if _, exists := providers[api.KubernetesMetrics]; exists && in.MetricsCollector != nil && in.MetricsCollector.Enabled {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"))
}
if prometheusConfig, exists := providers[api.PrometheusMetrics]; exists {
if prometheusConfig.Prometheus == nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus configuration is required when prometheus source is enabled"))
} else {
if prometheusConfig.Prometheus.URL == "" {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
} else {
u, err := url.Parse(prometheusConfig.Prometheus.URL)
if err != nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
} else if u.Scheme != "https" {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL's scheme is not https, got %q instead", u.Scheme))
}
}
if prometheusConfig.Prometheus.AuthToken != nil {
secretRef := prometheusConfig.Prometheus.AuthToken.SecretReference
if secretRef == nil {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"))
} else if secretRef.Name == "" || secretRef.Namespace == "" {
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"))
}
}
}
}
return utilerrors.NewAggregate(errorsInPolicy)
} }

View File

@@ -17,7 +17,6 @@ limitations under the License.
package descheduler package descheduler
import ( import (
"errors"
"fmt" "fmt"
"testing" "testing"
@@ -122,25 +121,6 @@ profiles:
}, },
}, },
}, },
{
description: "v1alpha2 to internal, validate error handling (priorityThreshold exceeding maximum)",
policy: []byte(`apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
args:
priorityThreshold:
value: 2000000001
plugins:
deschedule:
enabled:
- "RemovePodsHavingTooManyRestarts"
`),
result: nil,
err: errors.New("priority threshold can't be greater than 2000000000"),
},
} }
for _, tc := range testCases { for _, tc := range testCases {
@@ -211,167 +191,12 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
}, },
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"), result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
}, },
{
description: "Duplicit metrics providers error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{Source: api.KubernetesMetrics},
{Source: api.KubernetesMetrics},
},
},
result: fmt.Errorf("metric provider \"KubernetesMetrics\" is already configured, each source can be configured only once"),
},
{
description: "Too many metrics providers error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsCollector: &api.MetricsCollector{
Enabled: true,
},
MetricsProviders: []api.MetricsProvider{
{Source: api.KubernetesMetrics},
},
},
result: fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"),
},
{
description: "missing prometheus url error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{},
},
},
},
result: fmt.Errorf("prometheus URL is required when prometheus is enabled"),
},
{
description: "prometheus url is not valid error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "http://example.com:-80",
},
},
},
},
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
},
{
description: "prometheus url does not have https error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "http://example.com:80",
},
},
},
},
result: fmt.Errorf("prometheus URL's scheme is not https, got \"http\" instead"),
},
{
description: "prometheus authtoken with no secret reference error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "https://example.com:80",
AuthToken: &api.AuthToken{},
},
},
},
},
result: fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"),
},
{
description: "prometheus authtoken with empty secret reference error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "https://example.com:80",
AuthToken: &api.AuthToken{
SecretReference: &api.SecretReference{},
},
},
},
},
},
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
},
{
description: "prometheus authtoken missing secret reference namespace error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "https://example.com:80",
AuthToken: &api.AuthToken{
SecretReference: &api.SecretReference{
Name: "secretname",
},
},
},
},
},
},
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
},
{
description: "prometheus authtoken missing secret reference name error",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "https://example.com:80",
AuthToken: &api.AuthToken{
SecretReference: &api.SecretReference{
Namespace: "secretnamespace",
},
},
},
},
},
},
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
},
{
description: "valid prometheus authtoken secret reference",
deschedulerPolicy: api.DeschedulerPolicy{
MetricsProviders: []api.MetricsProvider{
{
Source: api.PrometheusMetrics,
Prometheus: &api.Prometheus{
URL: "https://example.com:80",
AuthToken: &api.AuthToken{
SecretReference: &api.SecretReference{
Name: "secretname",
Namespace: "secretnamespace",
},
},
},
},
},
},
},
} }
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
result := validateDeschedulerConfiguration(tc.deschedulerPolicy, pluginregistry.PluginRegistry) result := validateDeschedulerConfiguration(tc.deschedulerPolicy, pluginregistry.PluginRegistry)
if result == nil && tc.result != nil || result != nil && tc.result == nil { if result.Error() != tc.result.Error() {
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
} else if result == nil && tc.result == nil {
return
} else if result.Error() != tc.result.Error() {
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result) t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
} }
}) })

View File

@@ -1,49 +0,0 @@
package features
import (
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/component-base/featuregate"
)
const (
// Every feature gate should add method here following this template:
//
// // owner: @username
// // kep: kep link
// // alpha: v1.X
// MyFeature featuregate.Feature = "MyFeature"
//
// Feature gates should be listed in alphabetical, case-sensitive
// (upper before any lower case character) order. This reduces the risk
// of code conflicts because changes are more likely to be scattered
// across the file.
// owner: @ingvagabund
// kep: https://github.com/kubernetes-sigs/descheduler/issues/1397
// alpha: v1.31
//
// Enable evictions in background so users can create their own eviction policies
// as an alternative to immediate evictions.
EvictionsInBackground featuregate.Feature = "EvictionsInBackground"
)
func init() {
runtime.Must(DefaultMutableFeatureGate.Add(defaultDeschedulerFeatureGates))
}
// defaultDeschedulerFeatureGates consists of all known descheduler-specific feature keys.
// To add a new feature, define a key for it above and add it here. The features will be
// available throughout descheduler binary.
//
// Entries are separated from each other with blank lines to avoid sweeping gofmt changes
// when adding or removing one entry.
var defaultDeschedulerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
}
// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
// Tests that need to modify feature gates for the duration of their test should use:
//
// defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.<FeatureName>, <value>)()
var DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate()

View File

@@ -11,8 +11,6 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
promapi "github.com/prometheus/client_golang/api"
) )
type HandleImpl struct { type HandleImpl struct {
@@ -21,8 +19,7 @@ type HandleImpl struct {
SharedInformerFactoryImpl informers.SharedInformerFactory SharedInformerFactoryImpl informers.SharedInformerFactory
EvictorFilterImpl frameworktypes.EvictorPlugin EvictorFilterImpl frameworktypes.EvictorPlugin
PodEvictorImpl *evictions.PodEvictor PodEvictorImpl *evictions.PodEvictor
MetricsCollectorImpl *metricscollector.MetricsCollector MetricsCollectorImpl *metricscollector.MetricsCollector
PrometheusClientImpl promapi.Client
} }
var _ frameworktypes.Handle = &HandleImpl{} var _ frameworktypes.Handle = &HandleImpl{}
@@ -31,10 +28,6 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
return hi.ClientsetImpl return hi.ClientsetImpl
} }
func (hi *HandleImpl) PrometheusClient() promapi.Client {
return hi.PrometheusClientImpl
}
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector { func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
return hi.MetricsCollectorImpl return hi.MetricsCollectorImpl
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -195,20 +195,6 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
return nil return nil
}) })
} }
if defaultEvictorArgs.IgnorePodsWithoutPDB {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
hasPdb, err := utils.IsPodCoveredByPDB(pod, handle.SharedInformerFactory().Policy().V1().PodDisruptionBudgets().Lister())
if err != nil {
return fmt.Errorf("unable to check if pod is covered by PodDisruptionBudget: %w", err)
}
if !hasPdb {
return fmt.Errorf("no PodDisruptionBudget found for pod")
}
return nil
})
}
return ev, nil return ev, nil
} }

View File

@@ -22,7 +22,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
@@ -40,17 +39,14 @@ type testCase struct {
description string description string
pods []*v1.Pod pods []*v1.Pod
nodes []*v1.Node nodes []*v1.Node
pdbs []*policyv1.PodDisruptionBudget
evictFailedBarePods bool evictFailedBarePods bool
evictLocalStoragePods bool evictLocalStoragePods bool
evictSystemCriticalPods bool evictSystemCriticalPods bool
ignorePvcPods bool
priorityThreshold *int32 priorityThreshold *int32
nodeFit bool nodeFit bool
minReplicas uint minReplicas uint
minPodAge *metav1.Duration minPodAge *metav1.Duration
result bool result bool
ignorePodsWithoutPDB bool
} }
func TestDefaultEvictorPreEvictionFilter(t *testing.T) { func TestDefaultEvictorPreEvictionFilter(t *testing.T) {
@@ -743,65 +739,6 @@ func TestDefaultEvictorFilter(t *testing.T) {
}), }),
}, },
result: true, result: true,
}, {
description: "ignorePodsWithoutPDB, pod with no PDBs, no eviction",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Labels = map[string]string{
"app": "foo",
}
}),
},
ignorePodsWithoutPDB: true,
result: false,
}, {
description: "ignorePodsWithoutPDB, pod with PDBs, evicts",
pods: []*v1.Pod{
test.BuildTestPod("p1", 1, 1, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Labels = map[string]string{
"app": "foo",
}
}),
},
pdbs: []*policyv1.PodDisruptionBudget{
test.BuildTestPDB("pdb1", "foo"),
},
ignorePodsWithoutPDB: true,
result: true,
}, {
description: "ignorePvcPods is set, pod with PVC, not evicts",
pods: []*v1.Pod{
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "pvc", VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
},
},
}
}),
},
ignorePvcPods: true,
result: false,
}, {
description: "ignorePvcPods is not set, pod with PVC, evicts",
pods: []*v1.Pod{
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod.Spec.Volumes = []v1.Volume{
{
Name: "pvc", VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
},
},
}
}),
},
ignorePvcPods: false,
result: true,
}, },
} }
@@ -874,15 +811,11 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
for _, pod := range test.pods { for _, pod := range test.pods {
objs = append(objs, pod) objs = append(objs, pod)
} }
for _, pdb := range test.pdbs {
objs = append(objs, pdb)
}
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer() podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
_ = sharedInformerFactory.Policy().V1().PodDisruptionBudgets().Lister()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
@@ -895,15 +828,14 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
defaultEvictorArgs := &DefaultEvictorArgs{ defaultEvictorArgs := &DefaultEvictorArgs{
EvictLocalStoragePods: test.evictLocalStoragePods, EvictLocalStoragePods: test.evictLocalStoragePods,
EvictSystemCriticalPods: test.evictSystemCriticalPods, EvictSystemCriticalPods: test.evictSystemCriticalPods,
IgnorePvcPods: test.ignorePvcPods, IgnorePvcPods: false,
EvictFailedBarePods: test.evictFailedBarePods, EvictFailedBarePods: test.evictFailedBarePods,
PriorityThreshold: &api.PriorityThreshold{ PriorityThreshold: &api.PriorityThreshold{
Value: test.priorityThreshold, Value: test.priorityThreshold,
}, },
NodeFit: test.nodeFit, NodeFit: test.nodeFit,
MinReplicas: test.minReplicas, MinReplicas: test.minReplicas,
MinPodAge: test.minPodAge, MinPodAge: test.minPodAge,
IgnorePodsWithoutPDB: test.ignorePodsWithoutPDB,
} }
evictorPlugin, err := New( evictorPlugin, err := New(

View File

@@ -42,7 +42,6 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
LabelSelector: nil, LabelSelector: nil,
PriorityThreshold: nil, PriorityThreshold: nil,
NodeFit: false, NodeFit: false,
IgnorePodsWithoutPDB: false,
}, },
}, },
{ {
@@ -58,8 +57,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
PriorityThreshold: &api.PriorityThreshold{ PriorityThreshold: &api.PriorityThreshold{
Value: utilptr.To[int32](800), Value: utilptr.To[int32](800),
}, },
NodeFit: true, NodeFit: true,
IgnorePodsWithoutPDB: true,
}, },
want: &DefaultEvictorArgs{ want: &DefaultEvictorArgs{
NodeSelector: "NodeSelector", NodeSelector: "NodeSelector",
@@ -72,8 +70,7 @@ func TestSetDefaults_DefaultEvictorArgs(t *testing.T) {
PriorityThreshold: &api.PriorityThreshold{ PriorityThreshold: &api.PriorityThreshold{
Value: utilptr.To[int32](800), Value: utilptr.To[int32](800),
}, },
NodeFit: true, NodeFit: true,
IgnorePodsWithoutPDB: true,
}, },
}, },
} }

View File

@@ -36,5 +36,4 @@ type DefaultEvictorArgs struct {
NodeFit bool `json:"nodeFit,omitempty"` NodeFit bool `json:"nodeFit,omitempty"`
MinReplicas uint `json:"minReplicas,omitempty"` MinReplicas uint `json:"minReplicas,omitempty"`
MinPodAge *metav1.Duration `json:"minPodAge,omitempty"` MinPodAge *metav1.Duration `json:"minPodAge,omitempty"`
IgnorePodsWithoutPDB bool `json:"ignorePodsWithoutPDB,omitempty"`
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -1,90 +0,0 @@
# Descheduler Plugin: Example Implementation
This directory provides an example plugin for the Kubernetes Descheduler,
demonstrating how to evict pods based on custom criteria. The plugin targets
pods based on:
* **Name Regex:** Pods matching a specified regular expression.
* **Age:** Pods older than a defined duration.
* **Namespace:** Pods within or outside a given list of namespaces (inclusion
or exclusion).
## Building and Integrating the Plugin
To incorporate this plugin into your Descheduler build, you must register it
within the Descheduler's plugin registry. Follow these steps:
1. **Register the Plugin:**
* Modify the `pkg/descheduler/setupplugins.go` file.
* Add the following registration line to the end of the
`RegisterDefaultPlugins()` function:
```go
pluginregistry.Register(
example.PluginName,
example.New,
&example.Example{},
&example.ExampleArgs{},
example.ValidateExampleArgs,
example.SetDefaults_Example,
registry,
)
```
2. **Generate Code:**
* If you modify the plugin's code, execute `make gen` before rebuilding the
Descheduler. This ensures generated code is up-to-date.
3. **Rebuild the Descheduler:**
* Build the descheduler with your changes.
## Plugin Configuration
Configure the plugin's behavior using the Descheduler's policy configuration.
Here's an example:
```yaml
apiVersion: descheduler/v1alpha2
kind: DeschedulerPolicy
profiles:
- name: LifecycleAndUtilization
plugins:
deschedule:
enabled:
- Example
pluginConfig:
- name: Example
args:
regex: ^descheduler-test.*$
maxAge: 3m
namespaces:
include:
- default
```
## Explanation
- `regex: ^descheduler-test.*$`: Evicts pods whose names match the regular
expression `^descheduler-test.*$`.
- `maxAge: 3m`: Evicts pods older than 3 minutes.
- `namespaces.include: - default`: Evicts pods within the default namespace.
This configuration will cause the plugin to evict pods that meet all three
criteria: matching the `regex`, exceeding the `maxAge`, and residing in the
specified namespace.
## Notes
- This plugin is configured through the `ExampleArgs` struct, which defines the
plugin's parameters.
- Plugins must implement a function to validate and another to set the default
values for their `Args` struct.
- The fields in the `ExampleArgs` struct reflect directly into the
`DeschedulerPolicy` configuration.
- Plugins must comply with the `DeschedulePlugin` interface to be registered
with the Descheduler.
- The main functionality of the plugin is implemented in the `Deschedule()`
method, which is called by the Descheduler when the plugin is executed.
- A good amount of descheduling logic can be achieved by means of filters.
- Whenever a change in the Plugin's configuration is made the developer should
regenerate the code by running `make gen`.

View File

@@ -1,36 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_Example sets the default arguments for the Example plugin. On
// this case we set the default regex to match only empty strings (this should
// not ever match anything). The default maximum age for pods is set to 5
// minutes.
func SetDefaults_Example(obj runtime.Object) {
args := obj.(*ExampleArgs)
if args.Regex == "" {
args.Regex = "^$"
}
if args.MaxAge == "" {
args.MaxAge = "5m"
}
}

View File

@@ -1,16 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:defaulter-gen=TypeMeta
package example

View File

@@ -1,170 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
"context"
"fmt"
"regexp"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
fwtypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
)
// PluginName is used when registering the plugin. You need to choose a unique
// name across all plugins. This name is used to identify the plugin config in
// the descheduler policy.
const PluginName = "Example"
// We need to ensure that the plugin struct complies with the DeschedulePlugin
// interface. This prevent unexpected changes that may render this type
// incompatible.
var _ fwtypes.DeschedulePlugin = &Example{}
// Example is our plugin (implementing the DeschedulePlugin interface). This
// plugin will evict pods that match a regex and are older than a certain age.
type Example struct {
handle fwtypes.Handle
args *ExampleArgs
podFilter podutil.FilterFunc
}
// New builds a plugin instance from its arguments. Arguments are passed in as
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
// set, evictor interface, shared informer factory and other instruments shared
// across different plugins.
func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
// make sure we are receiving the right argument type.
exampleArgs, ok := args.(*ExampleArgs)
if !ok {
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
}
// we can use the included and excluded namespaces to filter the pods we want
// to evict.
var includedNamespaces, excludedNamespaces sets.Set[string]
if exampleArgs.Namespaces != nil {
includedNamespaces = sets.New(exampleArgs.Namespaces.Include...)
excludedNamespaces = sets.New(exampleArgs.Namespaces.Exclude...)
}
// here we create a pod filter that will return only pods that can be
// evicted (according to the evictor and inside the namespaces we want).
// NOTE: here we could also add a function to filter out by the regex and
// age but for sake of the example we are keeping it simple and filtering
// those out in the Deschedule() function.
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithFilter(
podutil.WrapFilterFuncs(
handle.Evictor().Filter,
handle.Evictor().PreEvictionFilter,
),
).
BuildFilterFunc()
if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
}
return &Example{
handle: handle,
podFilter: podFilter,
args: exampleArgs,
}, nil
}
// Name returns the plugin name.
func (d *Example) Name() string {
return PluginName
}
// Deschedule is the function where most of the logic around eviction is laid
// down. Here we go through all pods in all nodes and evict the ones that match
// the regex and are older than the maximum age. This function receives a list
// of nodes we need to process.
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
var podsToEvict []*v1.Pod
logger := klog.FromContext(ctx)
logger.Info("Example plugin starting descheduling")
re, err := regexp.Compile(d.args.Regex)
if err != nil {
err = fmt.Errorf("fail to compile regex: %w", err)
return &fwtypes.Status{Err: err}
}
duration, err := time.ParseDuration(d.args.MaxAge)
if err != nil {
err = fmt.Errorf("fail to parse max age: %w", err)
return &fwtypes.Status{Err: err}
}
// here we create an auxiliar filter to remove all pods that don't
// match the provided regex or are still too young to be evicted.
// This filter will be used when we list all pods on a node. This
// filter here could have been part of the podFilter but we are
// keeping it separate for the sake of the example.
filter := func(pod *v1.Pod) bool {
if !re.MatchString(pod.Name) {
return false
}
deadline := pod.CreationTimestamp.Add(duration)
return time.Now().After(deadline)
}
// go node by node getting all pods that we can evict.
for _, node := range nodes {
// ListAllPodsOnANode is a helper function that retrieves all
// pods filtering out the ones we can't evict. We merge the
// default filters with the one we created above.
pods, err := podutil.ListAllPodsOnANode(
node.Name,
d.handle.GetPodsAssignedToNodeFunc(),
podutil.WrapFilterFuncs(d.podFilter, filter),
)
if err != nil {
err = fmt.Errorf("fail to list pods: %w", err)
return &fwtypes.Status{Err: err}
}
// as we have already filtered out pods that don't match the
// regex or are too young we can simply add them all to the
// eviction list.
podsToEvict = append(podsToEvict, pods...)
}
// evict all the pods.
for _, pod := range podsToEvict {
logger.Info("Example plugin evicting pod", "pod", klog.KObj(pod))
opts := evictions.EvictOptions{StrategyName: PluginName}
if err := d.handle.Evictor().Evict(ctx, pod, opts); err != nil {
logger.Error(err, "unable to evict pod", "pod", klog.KObj(pod))
}
}
logger.Info("Example plugin finished descheduling")
return nil
}

View File

@@ -1,31 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
"k8s.io/apimachinery/pkg/runtime"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder()
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addDefaultingFuncs)
}

View File

@@ -1,45 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/descheduler/pkg/api"
)
// +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ExampleArgs holds a list of arguments used to configure the plugin. For this
// simple example we only care about a regex, a maximum age and possibly a list
// of namespaces to which we want to apply the descheduler. This plugin evicts
// pods that match a given regular expression and are older than the maximum
// allowed age. Most of the fields here were defined as strings so we can
// validate them somewhere else (show you a better implementation example).
type ExampleArgs struct {
metav1.TypeMeta `json:",inline"`
// Regex is a regular expression we use to match against pod names. If
// the pod name matches the regex it will be evicted. This is expected
// to be a valid regular expression (according to go's regexp package).
Regex string `json:"regex"`
// MaxAge is the maximum age a pod can have before it is considered for
// eviction. This is expected to be a valid time.Duration.
MaxAge string `json:"maxAge"`
// Namespaces allows us to filter on which namespaces we want to apply
// the descheduler.
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
}

View File

@@ -1,45 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package example
import (
"fmt"
"regexp"
"time"
"k8s.io/apimachinery/pkg/runtime"
)
// ValidateExampleArgs validates if the plugin arguments are correct (we have
// everything we need). On this case we only validate if we have a valid
// regular expression and maximum age.
func ValidateExampleArgs(obj runtime.Object) error {
args := obj.(*ExampleArgs)
if args.Regex == "" {
return fmt.Errorf("regex argument must be set")
}
if _, err := regexp.Compile(args.Regex); err != nil {
return fmt.Errorf("invalid regex: %v", err)
}
if _, err := time.ParseDuration(args.MaxAge); err != nil {
return fmt.Errorf("invalid max age: %v", err)
}
return nil
}

View File

@@ -1,57 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package example
import (
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExampleArgs) DeepCopyInto(out *ExampleArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(api.Namespaces)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleArgs.
func (in *ExampleArgs) DeepCopy() *ExampleArgs {
if in == nil {
return nil
}
out := new(ExampleArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ExampleArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -1,33 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package example
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -1,84 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package classifier
// Classifier is a function that classifies a resource usage based on a limit.
// The function should return true if the resource usage matches the classifier
// intent.
type Classifier[K comparable, V any] func(K, V, V) bool
// Comparer is a function that compares two objects. This function should return
// -1 if the first object is less than the second, 0 if they are equal, and 1 if
// the first object is greater than the second. Of course this is a simplification
// and any value between -1 and 1 can be returned.
type Comparer[V any] func(V, V) int
// Values is a map of values indexed by a comparable key. An example of this
// can be a list of resources indexed by a node name.
type Values[K comparable, V any] map[K]V
// Limits is a map of list of limits indexed by a comparable key. Each limit
// inside the list requires a classifier to evaluate.
type Limits[K comparable, V any] map[K][]V
// Classify is a function that classifies based on classifier functions. This
// function receives Values, a list of n Limits (indexed by name), and a list
// of n Classifiers. The classifier at n position is called to evaluate the
// limit at n position. The first classifier to return true will receive the
// value, at this point the loop will break and the next value will be
// evaluated. This function returns a slice of maps, each position in the
// returned slice correspond to one of the classifiers (e.g. if n limits
// and classifiers are provided, the returned slice will have n maps).
func Classify[K comparable, V any](
values Values[K, V], limits Limits[K, V], classifiers ...Classifier[K, V],
) []map[K]V {
result := make([]map[K]V, len(classifiers))
for i := range classifiers {
result[i] = make(map[K]V)
}
for index, usage := range values {
for i, limit := range limits[index] {
if len(classifiers) <= i {
continue
}
if !classifiers[i](index, usage, limit) {
continue
}
result[i][index] = usage
break
}
}
return result
}
// ForMap is a function that returns a classifier that compares all values in a
// map. The function receives a Comparer function that is used to compare all
// the map values. The returned Classifier will return true only if the
// provided Comparer function returns a value less than 0 for all the values.
func ForMap[K, I comparable, V any, M ~map[I]V](cmp Comparer[V]) Classifier[K, M] {
return func(_ K, usages, limits M) bool {
for idx, usage := range usages {
if limit, ok := limits[idx]; ok {
if cmp(usage, limit) >= 0 {
return false
}
}
}
return true
}
}

View File

@@ -1,739 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package classifier
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/utils/ptr"
)
func TestClassifySimple(t *testing.T) {
for _, tt := range []struct {
name string
usage map[string]int
limits map[string][]int
classifiers []Classifier[string, int]
expected []map[string]int
}{
{
name: "empty",
usage: map[string]int{},
limits: map[string][]int{},
expected: []map[string]int{},
},
{
name: "one under one over",
usage: map[string]int{
"node1": 2,
"node2": 8,
},
limits: map[string][]int{
"node1": {4, 6},
"node2": {4, 6},
},
expected: []map[string]int{
{"node1": 2},
{"node2": 8},
},
classifiers: []Classifier[string, int]{
func(_ string, usage, limit int) bool {
return usage < limit
},
func(_ string, usage, limit int) bool {
return usage > limit
},
},
},
{
name: "randomly positioned over utilized",
usage: map[string]int{
"node1": 2,
"node2": 8,
"node3": 2,
"node4": 8,
"node5": 8,
"node6": 2,
"node7": 2,
"node8": 8,
"node9": 8,
},
limits: map[string][]int{
"node1": {4, 6},
"node2": {4, 6},
"node3": {4, 6},
"node4": {4, 6},
"node5": {4, 6},
"node6": {4, 6},
"node7": {4, 6},
"node8": {4, 6},
"node9": {4, 6},
},
expected: []map[string]int{
{
"node1": 2,
"node3": 2,
"node6": 2,
"node7": 2,
},
{
"node2": 8,
"node4": 8,
"node5": 8,
"node8": 8,
"node9": 8,
},
},
classifiers: []Classifier[string, int]{
func(_ string, usage, limit int) bool {
return usage < limit
},
func(_ string, usage, limit int) bool {
return usage > limit
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := Classify(tt.usage, tt.limits, tt.classifiers...)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}
func TestClassify_pointers(t *testing.T) {
for _, tt := range []struct {
name string
usage map[string]map[v1.ResourceName]*resource.Quantity
limits map[string][]map[v1.ResourceName]*resource.Quantity
classifiers []Classifier[string, map[v1.ResourceName]*resource.Quantity]
expected []map[string]map[v1.ResourceName]*resource.Quantity
}{
{
name: "empty",
usage: map[string]map[v1.ResourceName]*resource.Quantity{},
limits: map[string][]map[v1.ResourceName]*resource.Quantity{},
expected: []map[string]map[v1.ResourceName]*resource.Quantity{},
},
{
name: "single underutilized",
usage: map[string]map[v1.ResourceName]*resource.Quantity{
"node1": {
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
},
},
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
"node1": {
{
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
},
},
},
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
{
"node1": {
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
},
},
},
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
func(usage, limit *resource.Quantity) int {
return usage.Cmp(*limit)
},
),
},
},
{
name: "single underutilized and properly utilized",
usage: map[string]map[v1.ResourceName]*resource.Quantity{
"node1": {
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
},
"node2": {
v1.ResourceCPU: ptr.To(resource.MustParse("5")),
v1.ResourceMemory: ptr.To(resource.MustParse("5Gi")),
},
"node3": {
v1.ResourceCPU: ptr.To(resource.MustParse("8")),
v1.ResourceMemory: ptr.To(resource.MustParse("8Gi")),
},
},
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
"node1": {
{
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
},
{
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
},
},
"node2": {
{
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
},
{
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
},
},
"node3": {
{
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
},
{
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
},
},
},
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
{
"node1": {
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
},
},
{},
},
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
func(usage, limit *resource.Quantity) int {
return usage.Cmp(*limit)
},
),
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
func(usage, limit *resource.Quantity) int {
return limit.Cmp(*usage)
},
),
},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := Classify(tt.usage, tt.limits, tt.classifiers...)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}
func TestClassify(t *testing.T) {
for _, tt := range []struct {
name string
usage map[string]v1.ResourceList
limits map[string][]v1.ResourceList
classifiers []Classifier[string, v1.ResourceList]
expected []map[string]v1.ResourceList
}{
{
name: "empty",
usage: map[string]v1.ResourceList{},
limits: map[string][]v1.ResourceList{},
expected: []map[string]v1.ResourceList{},
},
{
name: "single underutilized",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
},
},
{
name: "less classifiers than limits",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("5"),
v1.ResourceMemory: resource.MustParse("5Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
"node2": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
"node3": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
},
},
{
name: "more classifiers than limits",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("20"),
v1.ResourceMemory: resource.MustParse("20"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("50"),
v1.ResourceMemory: resource.MustParse("50"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("80"),
v1.ResourceMemory: resource.MustParse("80"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("30"),
v1.ResourceMemory: resource.MustParse("30"),
},
},
"node2": {
{
v1.ResourceCPU: resource.MustParse("30"),
v1.ResourceMemory: resource.MustParse("30"),
},
},
"node3": {
{
v1.ResourceCPU: resource.MustParse("30"),
v1.ResourceMemory: resource.MustParse("30"),
},
},
},
expected: []map[string]v1.ResourceList{
{
"node1": {
v1.ResourceCPU: resource.MustParse("20"),
v1.ResourceMemory: resource.MustParse("20"),
},
},
{},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
{
name: "single underutilized and properly utilized",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("5"),
v1.ResourceMemory: resource.MustParse("5Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
"node2": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
"node3": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("16"),
v1.ResourceMemory: resource.MustParse("16Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
{},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
{
name: "single underutilized and multiple over utilized nodes",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
"node2": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
"node3": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
},
{
"node2": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
{
name: "over and under at the same time",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
"node2": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{},
{},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
{
name: "only memory over utilized",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("5"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
limits: map[string][]v1.ResourceList{
"node1": {
{
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
{
v1.ResourceCPU: resource.MustParse("6"),
v1.ResourceMemory: resource.MustParse("6Gi"),
},
},
},
expected: []map[string]v1.ResourceList{
{},
{},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
{
name: "randomly positioned over utilized",
usage: map[string]v1.ResourceList{
"node1": {v1.ResourceCPU: resource.MustParse("8")},
"node2": {v1.ResourceCPU: resource.MustParse("2")},
"node3": {v1.ResourceCPU: resource.MustParse("8")},
"node4": {v1.ResourceCPU: resource.MustParse("2")},
"node5": {v1.ResourceCPU: resource.MustParse("8")},
"node6": {v1.ResourceCPU: resource.MustParse("8")},
"node7": {v1.ResourceCPU: resource.MustParse("8")},
"node8": {v1.ResourceCPU: resource.MustParse("2")},
"node9": {v1.ResourceCPU: resource.MustParse("5")},
},
limits: map[string][]v1.ResourceList{
"node1": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node2": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node3": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node4": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node5": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node6": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node7": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node8": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
"node9": {
{v1.ResourceCPU: resource.MustParse("4")},
{v1.ResourceCPU: resource.MustParse("6")},
},
},
expected: []map[string]v1.ResourceList{
{
"node2": {v1.ResourceCPU: resource.MustParse("2")},
"node4": {v1.ResourceCPU: resource.MustParse("2")},
"node8": {v1.ResourceCPU: resource.MustParse("2")},
},
{
"node1": {v1.ResourceCPU: resource.MustParse("8")},
"node3": {v1.ResourceCPU: resource.MustParse("8")},
"node5": {v1.ResourceCPU: resource.MustParse("8")},
"node6": {v1.ResourceCPU: resource.MustParse("8")},
"node7": {v1.ResourceCPU: resource.MustParse("8")},
},
},
classifiers: []Classifier[string, v1.ResourceList]{
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return usage.Cmp(limit)
},
),
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
func(usage, limit resource.Quantity) int {
return limit.Cmp(usage)
},
),
},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := Classify(tt.usage, tt.limits, tt.classifiers...)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}

View File

@@ -19,9 +19,9 @@ package nodeutilization
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
@@ -29,242 +29,164 @@ import (
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
) )
const HighNodeUtilizationPluginName = "HighNodeUtilization" const HighNodeUtilizationPluginName = "HighNodeUtilization"
// this lines makes sure that HighNodeUtilization implements the BalancePlugin // HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
// interface. // Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler
// can schedule according to its plugin. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
type HighNodeUtilization struct { type HighNodeUtilization struct {
handle frameworktypes.Handle handle frameworktypes.Handle
args *HighNodeUtilizationArgs args *HighNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool podFilter func(pod *v1.Pod) bool
criteria []any underutilizationCriteria []interface{}
resourceNames []v1.ResourceName resourceNames []v1.ResourceName
highThresholds api.ResourceThresholds targetThresholds api.ResourceThresholds
usageClient usageClient usageSnapshot usageClient
} }
// NewHighNodeUtilization builds plugin from its arguments while passing a handle. var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
func NewHighNodeUtilization(
genericArgs runtime.Object, handle frameworktypes.Handle, // NewHighNodeUtilization builds plugin from its arguments while passing a handle
) (frameworktypes.Plugin, error) { func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
args, ok := genericArgs.(*HighNodeUtilizationArgs) highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
if !ok { if !ok {
return nil, fmt.Errorf( return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
"want args to be of type HighNodeUtilizationArgs, got %T",
genericArgs,
)
} }
// this plugins worries only about thresholds but the nodeplugins targetThresholds := make(api.ResourceThresholds)
// package was made to take two thresholds into account, one for low setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
// and another for high usage. here we make sure we set the high resourceNames := getResourceNames(targetThresholds)
// threshold to the maximum value for all resources for which we have a
// threshold. underutilizationCriteria := []interface{}{
highThresholds := make(api.ResourceThresholds) "CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
for rname := range args.Thresholds { "Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
highThresholds[rname] = MaxResourcePercentage "Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
}
for name := range highNodeUtilizatioArgs.Thresholds {
if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
}
} }
// get the resource names for which we have a threshold. this is podFilter, err := podutil.NewOptions().
// later used when determining if we are going to evict a pod. WithFilter(handle.Evictor().Filter).
resourceThresholds := getResourceNames(args.Thresholds)
// by default we evict pods from the under utilized nodes even if they
// don't define a request for a given threshold. this works most of the
// times and there is an use case for it. When using the restrict mode
// we evaluate if the pod has a request for any of the resources the
// user has provided as threshold.
filters := []podutil.FilterFunc{handle.Evictor().Filter}
if slices.Contains(args.EvictionModes, EvictionModeOnlyThresholdingResources) {
filters = append(
filters,
withResourceRequestForAny(resourceThresholds...),
)
}
podFilter, err := podutil.
NewOptions().
WithFilter(podutil.WrapFilterFuncs(filters...)).
BuildFilterFunc() BuildFilterFunc()
if err != nil { if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err) return nil, fmt.Errorf("error initializing pod filter function: %v", err)
} }
// resourceNames is a list of all resource names this plugin cares
// about. we care about the resources for which we have a threshold and
// all we consider the basic resources (cpu, memory, pods).
resourceNames := uniquifyResourceNames(
append(
resourceThresholds,
v1.ResourceCPU,
v1.ResourceMemory,
v1.ResourcePods,
),
)
return &HighNodeUtilization{ return &HighNodeUtilization{
handle: handle, handle: handle,
args: args, args: highNodeUtilizatioArgs,
resourceNames: resourceNames, resourceNames: resourceNames,
highThresholds: highThresholds, targetThresholds: targetThresholds,
criteria: thresholdsToKeysAndValues(args.Thresholds), underutilizationCriteria: underutilizationCriteria,
podFilter: podFilter, podFilter: podFilter,
usageClient: newRequestedUsageClient( usageSnapshot: newRequestedUsageSnapshot(resourceNames, handle.GetPodsAssignedToNodeFunc()),
resourceNames,
handle.GetPodsAssignedToNodeFunc(),
),
}, nil }, nil
} }
// Name retrieves the plugin name. // Name retrieves the plugin name
func (h *HighNodeUtilization) Name() string { func (h *HighNodeUtilization) Name() string {
return HighNodeUtilizationPluginName return HighNodeUtilizationPluginName
} }
// Balance holds the main logic of the plugin. It evicts pods from under // Balance extension point implementation for the plugin
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
// less nodes are used.
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status { func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
if err := h.usageClient.sync(ctx, nodes); err != nil { if err := h.usageSnapshot.capture(nodes); err != nil {
return &frameworktypes.Status{ return &frameworktypes.Status{
Err: fmt.Errorf("error getting node usage: %v", err), Err: fmt.Errorf("error getting node usage: %v", err),
} }
} }
// take a picture of the current state of the nodes, everything else sourceNodes, highNodes := classifyNodes(
// here is based on this snapshot. getNodeUsage(nodes, h.usageSnapshot),
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, h.usageClient) getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageSnapshot),
capacities := referencedResourceListForNodesCapacity(nodes) func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
// node usages are not presented as percentages over the capacity.
// we need to normalize them to be able to compare them with the
// thresholds. thresholds are already provided by the user in
// percentage.
usage, thresholds := assessNodesUsagesAndStaticThresholds(
nodesUsageMap, capacities, h.args.Thresholds, h.highThresholds,
)
// classify nodes in two groups: underutilized and schedulable. we will
// later try to move pods from the first group to the second.
nodeGroups := classifier.Classify(
usage, thresholds,
// underutilized nodes.
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
return isNodeBelowThreshold(usage, threshold)
}, },
// schedulable nodes. func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
func(nodeName string, usage, threshold api.ResourceThresholds) bool { if nodeutil.IsNodeUnschedulable(node) {
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) { klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
klog.V(2).InfoS(
"Node is unschedulable",
"node", klog.KObj(nodesMap[nodeName]),
)
return false return false
} }
return true return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}, })
)
// the nodeplugin package works by means of NodeInfo structures. these // log message in one line
// structures hold a series of information about the nodes. now that klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
// we have classified the nodes, we can build the NodeInfo structures klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
// for each group. NodeInfo structs carry usage and available resources
// for each node.
nodeInfos := make([][]NodeInfo, 2)
category := []string{"underutilized", "overutilized"}
for i := range nodeGroups {
for nodeName := range nodeGroups[i] {
klog.InfoS(
"Node has been classified",
"category", category[i],
"node", klog.KObj(nodesMap[nodeName]),
"usage", nodesUsageMap[nodeName],
"usagePercentage", normalizer.Round(usage[nodeName]),
)
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
NodeUsage: NodeUsage{
node: nodesMap[nodeName],
usage: nodesUsageMap[nodeName],
allPods: podListMap[nodeName],
},
available: capNodeCapacitiesToThreshold(
nodesMap[nodeName],
thresholds[nodeName][1],
h.resourceNames,
),
})
}
}
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1] if len(sourceNodes) == 0 {
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
if len(lowNodes) == 0 {
klog.V(1).InfoS(
"No node is underutilized, nothing to do here, you might tune your thresholds further",
)
return nil return nil
} }
if len(sourceNodes) <= h.args.NumberOfNodes {
if len(lowNodes) <= h.args.NumberOfNodes { klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
klog.V(1).InfoS(
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
"underutilizedNodes", len(lowNodes),
"numberOfNodes", h.args.NumberOfNodes,
)
return nil return nil
} }
if len(sourceNodes) == len(nodes) {
if len(lowNodes) == len(nodes) {
klog.V(1).InfoS("All nodes are underutilized, nothing to do here") klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
return nil return nil
} }
if len(highNodes) == 0 {
if len(schedulableNodes) == 0 {
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here") klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
return nil return nil
} }
// stops the eviction process if the total available capacity sage has // stop if the total available usage has dropped to zero - no more pods can be scheduled
// dropped to zero - no more pods can be scheduled. this will signalize continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
// to stop if any of the available resources has dropped to zero. for name := range totalAvailableUsage {
continueEvictionCond := func(_ NodeInfo, avail api.ReferencedResourceList) bool { if totalAvailableUsage[name].CmpInt64(0) < 1 {
for name := range avail {
if avail[name].CmpInt64(0) < 1 {
return false return false
} }
} }
return true return true
} }
// sorts the nodes by the usage in ascending order. // Sort the nodes by the usage in ascending order
sortNodesByUsage(lowNodes, true) sortNodesByUsage(sourceNodes, true)
evictPodsFromSourceNodes( evictPodsFromSourceNodes(
ctx, ctx,
h.args.EvictableNamespaces, h.args.EvictableNamespaces,
lowNodes, sourceNodes,
schedulableNodes, highNodes,
h.handle.Evictor(), h.handle.Evictor(),
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName}, evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
h.podFilter, h.podFilter,
h.resourceNames, h.resourceNames,
continueEvictionCond, continueEvictionCond,
h.usageClient, h.usageSnapshot,
nil,
) )
return nil return nil
} }
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
// check if Pods/CPU/Mem are set, if not, set them to 100
if _, ok := thresholds[v1.ResourcePods]; !ok {
thresholds[v1.ResourcePods] = MaxResourcePercentage
}
if _, ok := thresholds[v1.ResourceCPU]; !ok {
thresholds[v1.ResourceCPU] = MaxResourcePercentage
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
}
// Default targetThreshold resource values to 100
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
for name := range thresholds {
if !nodeutil.IsBasicResource(name) {
targetThresholds[name] = MaxResourcePercentage
}
}
}

View File

@@ -48,7 +48,6 @@ func TestHighNodeUtilization(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
thresholds api.ResourceThresholds thresholds api.ResourceThresholds
evictionModes []EvictionMode
nodes []*v1.Node nodes []*v1.Node
pods []*v1.Pod pods []*v1.Pod
expectedPodsEvicted uint expectedPodsEvicted uint
@@ -245,7 +244,7 @@ func TestHighNodeUtilization(t *testing.T) {
}, },
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value) // All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: []*v1.Pod{ pods: []*v1.Pod{
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) { test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod) test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod) test.MakeBestEffortPod(pod)
}), }),
@@ -434,53 +433,6 @@ func TestHighNodeUtilization(t *testing.T) {
}, },
expectedPodsEvicted: 0, expectedPodsEvicted: 0,
}, },
{
name: "with extended resource threshold and no extended resource pods",
thresholds: api.ResourceThresholds{
extendedResource: 40,
},
evictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 10)
}),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 10)
}),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 10)
}),
},
pods: []*v1.Pod{
// pods on node1 have the extended resource
// request set and they put the node in the
// over utilization range.
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
}),
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
}),
// pods in the other nodes must not be evicted
// because they do not have the extended
// resource defined in their requests.
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
},
expectedPodsEvicted: 0,
},
} }
for _, testCase := range testCases { for _, testCase := range testCases {
@@ -522,13 +474,10 @@ func TestHighNodeUtilization(t *testing.T) {
}) })
} }
plugin, err := NewHighNodeUtilization( plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
&HighNodeUtilizationArgs{ Thresholds: testCase.thresholds,
Thresholds: testCase.thresholds, },
EvictionModes: testCase.evictionModes, handle)
},
handle,
)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err) t.Fatalf("Unable to initialize the plugin: %v", err)
} }

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@@ -28,241 +29,128 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types" frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
) )
const LowNodeUtilizationPluginName = "LowNodeUtilization" const LowNodeUtilizationPluginName = "LowNodeUtilization"
// this lines makes sure that HighNodeUtilization implements the BalancePlugin // LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// interface. // to calculate nodes' utilization and not the actual resource usage.
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
// LowNodeUtilization evicts pods from overutilized nodes to underutilized
// nodes. Note that CPU/Memory requests are used to calculate nodes'
// utilization and not the actual resource usage.
type LowNodeUtilization struct { type LowNodeUtilization struct {
handle frameworktypes.Handle handle frameworktypes.Handle
args *LowNodeUtilizationArgs args *LowNodeUtilizationArgs
podFilter func(pod *v1.Pod) bool podFilter func(pod *v1.Pod) bool
underCriteria []any underutilizationCriteria []interface{}
overCriteria []any overutilizationCriteria []interface{}
resourceNames []v1.ResourceName resourceNames []v1.ResourceName
extendedResourceNames []v1.ResourceName usageSnapshot usageClient
usageClient usageClient
} }
// NewLowNodeUtilization builds plugin from its arguments while passing a var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
// handle. this plugin aims to move workload from overutilized nodes to
// underutilized nodes. // NewLowNodeUtilization builds plugin from its arguments while passing a handle
func NewLowNodeUtilization( func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
genericArgs runtime.Object, handle frameworktypes.Handle, lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
) (frameworktypes.Plugin, error) {
args, ok := genericArgs.(*LowNodeUtilizationArgs)
if !ok { if !ok {
return nil, fmt.Errorf( return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
"want args to be of type LowNodeUtilizationArgs, got %T",
genericArgs,
)
} }
// resourceNames holds a list of resources for which the user has setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
// provided thresholds for. extendedResourceNames holds those as well
// as cpu, memory and pods if no prometheus collection is used.
resourceNames := getResourceNames(args.Thresholds)
extendedResourceNames := resourceNames
// if we are using prometheus we need to validate we have everything we underutilizationCriteria := []interface{}{
// need. if we aren't then we need to make sure we are also collecting "CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
// data for cpu, memory and pods. "Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
metrics := args.MetricsUtilization "Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
if metrics != nil && metrics.Source == api.PrometheusMetrics { }
if err := validatePrometheusMetricsUtilization(args); err != nil { for name := range lowNodeUtilizationArgsArgs.Thresholds {
return nil, err if !nodeutil.IsBasicResource(name) {
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
} }
} else {
extendedResourceNames = uniquifyResourceNames(
append(
resourceNames,
v1.ResourceCPU,
v1.ResourceMemory,
v1.ResourcePods,
),
)
} }
podFilter, err := podutil. overutilizationCriteria := []interface{}{
NewOptions(). "CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
}
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
if !nodeutil.IsBasicResource(name) {
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
}
}
podFilter, err := podutil.NewOptions().
WithFilter(handle.Evictor().Filter). WithFilter(handle.Evictor().Filter).
BuildFilterFunc() BuildFilterFunc()
if err != nil { if err != nil {
return nil, fmt.Errorf("error initializing pod filter function: %v", err) return nil, fmt.Errorf("error initializing pod filter function: %v", err)
} }
// this plugins supports different ways of collecting usage data. each resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
// different way provides its own "usageClient". here we make sure we
// have the correct one or an error is triggered. XXX MetricsServer is var usageSnapshot usageClient
// deprecated, removed once dropped. if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
var usageClient usageClient = newRequestedUsageClient( usageSnapshot = newActualUsageSnapshot(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(), } else {
) usageSnapshot = newRequestedUsageSnapshot(resourceNames, handle.GetPodsAssignedToNodeFunc())
if metrics != nil {
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
if err != nil {
return nil, err
}
} }
return &LowNodeUtilization{ return &LowNodeUtilization{
handle: handle, handle: handle,
args: args, args: lowNodeUtilizationArgsArgs,
underCriteria: thresholdsToKeysAndValues(args.Thresholds), underutilizationCriteria: underutilizationCriteria,
overCriteria: thresholdsToKeysAndValues(args.TargetThresholds), overutilizationCriteria: overutilizationCriteria,
resourceNames: resourceNames, resourceNames: resourceNames,
extendedResourceNames: extendedResourceNames, podFilter: podFilter,
podFilter: podFilter, usageSnapshot: usageSnapshot,
usageClient: usageClient,
}, nil }, nil
} }
// Name retrieves the plugin name. // Name retrieves the plugin name
func (l *LowNodeUtilization) Name() string { func (l *LowNodeUtilization) Name() string {
return LowNodeUtilizationPluginName return LowNodeUtilizationPluginName
} }
// Balance holds the main logic of the plugin. It evicts pods from over // Balance extension point implementation for the plugin
// utilized nodes to under utilized nodes. The goal here is to evenly
// distribute pods across nodes.
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status { func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
if err := l.usageClient.sync(ctx, nodes); err != nil { if err := l.usageSnapshot.capture(nodes); err != nil {
return &frameworktypes.Status{ return &frameworktypes.Status{
Err: fmt.Errorf("error getting node usage: %v", err), Err: fmt.Errorf("error getting node usage: %v", err),
} }
} }
// starts by taking a snapshot ofthe nodes usage. we will use this lowNodes, sourceNodes := classifyNodes(
// snapshot to assess the nodes usage and classify them as getNodeUsage(nodes, l.usageSnapshot),
// underutilized or overutilized. getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageSnapshot),
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, l.usageClient) // The node has to be schedulable (to be able to move workload there)
capacities := referencedResourceListForNodesCapacity(nodes) func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
if nodeutil.IsNodeUnschedulable(node) {
// usage, by default, is exposed in absolute values. we need to normalize klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
// them (convert them to percentages) to be able to compare them with the
// user provided thresholds. thresholds are already provided in percentage
// in the <0; 100> interval.
var usage map[string]api.ResourceThresholds
var thresholds map[string][]api.ResourceThresholds
if l.args.UseDeviationThresholds {
// here the thresholds provided by the user represent
// deviations from the average so we need to treat them
// differently. when calculating the average we only
// need to consider the resources for which the user
// has provided thresholds.
usage, thresholds = assessNodesUsagesAndRelativeThresholds(
filterResourceNames(nodesUsageMap, l.resourceNames),
capacities,
l.args.Thresholds,
l.args.TargetThresholds,
)
} else {
usage, thresholds = assessNodesUsagesAndStaticThresholds(
nodesUsageMap,
capacities,
l.args.Thresholds,
l.args.TargetThresholds,
)
}
// classify nodes in under and over utilized. we will later try to move
// pods from the overutilized nodes to the underutilized ones.
nodeGroups := classifier.Classify(
usage, thresholds,
// underutilization criteria processing. nodes that are
// underutilized but aren't schedulable are ignored.
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
klog.V(2).InfoS(
"Node is unschedulable, thus not considered as underutilized",
"node", klog.KObj(nodesMap[nodeName]),
)
return false return false
} }
return isNodeBelowThreshold(usage, threshold) return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
}, },
// overutilization criteria evaluation. func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
func(nodeName string, usage, threshold api.ResourceThresholds) bool { return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
return isNodeAboveThreshold(usage, threshold)
}, },
) )
// the nodeutilization package was designed to work with NodeInfo // log message for nodes with low utilization
// structs. these structs holds information about how utilized a node klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
// is. we need to go through the result of the classification and turn
// it into NodeInfo structs.
nodeInfos := make([][]NodeInfo, 2)
categories := []string{"underutilized", "overutilized"}
classifiedNodes := map[string]bool{}
for i := range nodeGroups {
for nodeName := range nodeGroups[i] {
classifiedNodes[nodeName] = true
klog.InfoS(
"Node has been classified",
"category", categories[i],
"node", klog.KObj(nodesMap[nodeName]),
"usage", nodesUsageMap[nodeName],
"usagePercentage", normalizer.Round(usage[nodeName]),
)
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
NodeUsage: NodeUsage{
node: nodesMap[nodeName],
usage: nodesUsageMap[nodeName],
allPods: podListMap[nodeName],
},
available: capNodeCapacitiesToThreshold(
nodesMap[nodeName],
thresholds[nodeName][1],
l.extendedResourceNames,
),
})
}
}
// log nodes that are appropriately utilized.
for nodeName := range nodesMap {
if !classifiedNodes[nodeName] {
klog.InfoS(
"Node is appropriately utilized",
"node", klog.KObj(nodesMap[nodeName]),
"usage", nodesUsageMap[nodeName],
"usagePercentage", normalizer.Round(usage[nodeName]),
)
}
}
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
// log messages for nodes with low and high utilization
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes)) klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes)) // log message for over utilized nodes
klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
if len(lowNodes) == 0 { if len(lowNodes) == 0 {
klog.V(1).InfoS( klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
"No node is underutilized, nothing to do here, you might tune your thresholds further",
)
return nil return nil
} }
if len(lowNodes) <= l.args.NumberOfNodes { if len(lowNodes) <= l.args.NumberOfNodes {
klog.V(1).InfoS( klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
"underutilizedNodes", len(lowNodes),
"numberOfNodes", l.args.NumberOfNodes,
)
return nil return nil
} }
@@ -271,15 +159,14 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
return nil return nil
} }
if len(highNodes) == 0 { if len(sourceNodes) == 0 {
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here") klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
return nil return nil
} }
// this is a stop condition for the eviction process. we stop as soon // stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
// as the node usage drops below the threshold. continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool { if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.available) {
return false return false
} }
for name := range totalAvailableUsage { for name := range totalAvailableUsage {
@@ -291,90 +178,52 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
return true return true
} }
// sort the nodes by the usage in descending order // Sort the nodes by the usage in descending order
sortNodesByUsage(highNodes, false) sortNodesByUsage(sourceNodes, false)
var nodeLimit *uint
if l.args.EvictionLimits != nil {
nodeLimit = l.args.EvictionLimits.Node
}
evictPodsFromSourceNodes( evictPodsFromSourceNodes(
ctx, ctx,
l.args.EvictableNamespaces, l.args.EvictableNamespaces,
highNodes, sourceNodes,
lowNodes, lowNodes,
l.handle.Evictor(), l.handle.Evictor(),
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName}, evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
l.podFilter, l.podFilter,
l.extendedResourceNames, l.resourceNames,
continueEvictionCond, continueEvictionCond,
l.usageClient, l.usageSnapshot,
nodeLimit,
) )
return nil return nil
} }
// validatePrometheusMetricsUtilization validates the Prometheus metrics func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
// utilization. XXX this should be done way earlier than this. // check if Pods/CPU/Mem are set, if not, set them to 100
func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error { if _, ok := thresholds[v1.ResourcePods]; !ok {
if args.MetricsUtilization.Prometheus == nil { if useDeviationThresholds {
return fmt.Errorf("prometheus property is missing") thresholds[v1.ResourcePods] = MinResourcePercentage
} targetThresholds[v1.ResourcePods] = MinResourcePercentage
} else {
if args.MetricsUtilization.Prometheus.Query == "" { thresholds[v1.ResourcePods] = MaxResourcePercentage
return fmt.Errorf("prometheus query is missing") targetThresholds[v1.ResourcePods] = MaxResourcePercentage
}
uResourceNames := getResourceNames(args.Thresholds)
oResourceNames := getResourceNames(args.TargetThresholds)
if len(uResourceNames) != 1 || uResourceNames[0] != MetricResource {
return fmt.Errorf(
"thresholds are expected to specify a single instance of %q resource, got %v instead",
MetricResource, uResourceNames,
)
}
if len(oResourceNames) != 1 || oResourceNames[0] != MetricResource {
return fmt.Errorf(
"targetThresholds are expected to specify a single instance of %q resource, got %v instead",
MetricResource, oResourceNames,
)
}
return nil
}
// usageClientForMetrics returns the correct usage client based on the
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
func usageClientForMetrics(
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
) (usageClient, error) {
metrics := args.MetricsUtilization
switch {
case metrics.MetricsServer, metrics.Source == api.KubernetesMetrics:
if handle.MetricsCollector() == nil {
return nil, fmt.Errorf("metrics client not initialized")
} }
return newActualUsageClient( }
resources, if _, ok := thresholds[v1.ResourceCPU]; !ok {
handle.GetPodsAssignedToNodeFunc(), if useDeviationThresholds {
handle.MetricsCollector(), thresholds[v1.ResourceCPU] = MinResourcePercentage
), nil targetThresholds[v1.ResourceCPU] = MinResourcePercentage
} else {
case metrics.Source == api.PrometheusMetrics: thresholds[v1.ResourceCPU] = MaxResourcePercentage
if handle.PrometheusClient() == nil { targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
return nil, fmt.Errorf("prometheus client not initialized") }
}
if _, ok := thresholds[v1.ResourceMemory]; !ok {
if useDeviationThresholds {
thresholds[v1.ResourceMemory] = MinResourcePercentage
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
} else {
thresholds[v1.ResourceMemory] = MaxResourcePercentage
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
} }
return newPrometheusUsageClient(
handle.GetPodsAssignedToNodeFunc(),
handle.PrometheusClient(),
metrics.Prometheus.Query,
), nil
case metrics.Source != "":
return nil, fmt.Errorf("unrecognized metrics source")
default:
return nil, fmt.Errorf("metrics source is empty")
} }
} }

View File

@@ -18,30 +18,35 @@ package nodeutilization
import ( import (
"context" "context"
"crypto/tls"
"fmt" "fmt"
"net"
"net/http"
"testing" "testing"
"time"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/metrics/pkg/apis/metrics/v1beta1" "k8s.io/metrics/pkg/apis/metrics/v1beta1"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake" fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions" "sigs.k8s.io/descheduler/pkg/descheduler/evictions"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktesting "sigs.k8s.io/descheduler/pkg/framework/testing"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
@@ -66,7 +71,6 @@ func TestLowNodeUtilization(t *testing.T) {
expectedPodsWithMetricsEvicted uint expectedPodsWithMetricsEvicted uint
evictedPods []string evictedPods []string
evictableNamespaces *api.Namespaces evictableNamespaces *api.Namespaces
evictionLimits *api.EvictionLimits
}{ }{
{ {
name: "no evictable pods", name: "no evictable pods",
@@ -716,60 +720,6 @@ func TestLowNodeUtilization(t *testing.T) {
expectedPodsEvicted: 0, expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0, expectedPodsWithMetricsEvicted: 0,
}, },
{
name: "with extended resource in some of nodes with deviation",
thresholds: api.ResourceThresholds{
v1.ResourcePods: 5,
extendedResource: 10,
},
targetThresholds: api.ResourceThresholds{
v1.ResourcePods: 5,
extendedResource: 10,
},
useDeviationThresholds: true,
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8)
}),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8)
}),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with extended resource.
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p2", 0, 0, n2NodeName, func(pod *v1.Pod) {
// A pod with extended resource.
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
}),
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 1,
expectedPodsWithMetricsEvicted: 0,
},
{ {
name: "without priorities, but only other node is unschedulable", name: "without priorities, but only other node is unschedulable",
thresholds: api.ResourceThresholds{ thresholds: api.ResourceThresholds{
@@ -1076,79 +1026,6 @@ func TestLowNodeUtilization(t *testing.T) {
expectedPodsWithMetricsEvicted: 2, expectedPodsWithMetricsEvicted: 2,
evictedPods: []string{}, evictedPods: []string{},
}, },
{
name: "deviation thresholds and overevicting memory",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 5,
v1.ResourcePods: 5,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 5,
v1.ResourcePods: 5,
},
useDeviationThresholds: true,
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
// totalcpuusage = 3600m, avgcpuusage = 3600/12000 = 0.3 => 30%
// totalpodsusage = 9, avgpodsusage = 9/30 = 0.3 => 30%
// n1 and n2 are fully memory utilized
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 375, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 375, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 375, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 375, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 375, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 375, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 375, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 4000, 3000),
test.BuildNodeMetrics(n2NodeName, 4000, 3000),
test.BuildNodeMetrics(n3NodeName, 4000, 3000),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 400, 375),
test.BuildPodMetrics("p2", 400, 375),
test.BuildPodMetrics("p3", 400, 375),
test.BuildPodMetrics("p4", 400, 375),
test.BuildPodMetrics("p5", 400, 375),
test.BuildPodMetrics("p6", 400, 375),
test.BuildPodMetrics("p7", 400, 375),
test.BuildPodMetrics("p8", 400, 375),
test.BuildPodMetrics("p9", 400, 3000),
},
expectedPodsEvicted: 0,
expectedPodsWithMetricsEvicted: 0,
evictedPods: []string{},
},
{ {
name: "without priorities different evictions for requested and actual resources", name: "without priorities different evictions for requested and actual resources",
thresholds: api.ResourceThresholds{ thresholds: api.ResourceThresholds{
@@ -1253,72 +1130,6 @@ func TestLowNodeUtilization(t *testing.T) {
expectedPodsEvicted: 3, expectedPodsEvicted: 3,
expectedPodsWithMetricsEvicted: 2, expectedPodsWithMetricsEvicted: 2,
}, },
{
name: "without priorities with node eviction limit",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
targetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourcePods: 50,
},
evictionLimits: &api.EvictionLimits{
Node: ptr.To[uint](2),
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
nodemetricses: []*v1beta1.NodeMetrics{
test.BuildNodeMetrics(n1NodeName, 3201, 0),
test.BuildNodeMetrics(n2NodeName, 401, 0),
test.BuildNodeMetrics(n3NodeName, 11, 0),
},
podmetricses: []*v1beta1.PodMetrics{
test.BuildPodMetrics("p1", 401, 0),
test.BuildPodMetrics("p2", 401, 0),
test.BuildPodMetrics("p3", 401, 0),
test.BuildPodMetrics("p4", 401, 0),
test.BuildPodMetrics("p5", 401, 0),
},
expectedPodsEvicted: 2,
expectedPodsWithMetricsEvicted: 2,
},
} }
for _, tc := range testCases { for _, tc := range testCases {
@@ -1334,29 +1145,20 @@ func TestLowNodeUtilization(t *testing.T) {
for _, pod := range tc.pods { for _, pod := range tc.pods {
objs = append(objs, pod) objs = append(objs, pod)
} }
var metricsObjs []runtime.Object
for _, nodemetrics := range tc.nodemetricses {
metricsObjs = append(metricsObjs, nodemetrics)
}
for _, podmetrics := range tc.podmetricses {
metricsObjs = append(metricsObjs, podmetrics)
}
fakeClient := fake.NewSimpleClientset(objs...) fakeClient := fake.NewSimpleClientset(objs...)
metricsClientset := fakemetricsclient.NewSimpleClientset(metricsObjs...)
var collector *metricscollector.MetricsCollector collector := metricscollector.NewMetricsCollector(fakeClient, metricsClientset)
if metricsEnabled { err := collector.Collect(ctx)
metricsClientset := fakemetricsclient.NewSimpleClientset() if err != nil {
for _, nodemetrics := range tc.nodemetricses { t.Fatalf("unable to collect metrics: %v", err)
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
}
for _, podmetrics := range tc.podmetricses {
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
}
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
collector = metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
err := collector.Collect(ctx)
if err != nil {
t.Fatalf("unable to collect metrics: %v", err)
}
} }
podsForEviction := make(map[string]struct{}) podsForEviction := make(map[string]struct{})
@@ -1386,18 +1188,14 @@ func TestLowNodeUtilization(t *testing.T) {
} }
handle.MetricsCollectorImpl = collector handle.MetricsCollectorImpl = collector
var metricsUtilization *MetricsUtilization
if metricsEnabled {
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
}
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{ plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
Thresholds: tc.thresholds, Thresholds: tc.thresholds,
TargetThresholds: tc.targetThresholds, TargetThresholds: tc.targetThresholds,
UseDeviationThresholds: tc.useDeviationThresholds, UseDeviationThresholds: tc.useDeviationThresholds,
EvictionLimits: tc.evictionLimits,
EvictableNamespaces: tc.evictableNamespaces, EvictableNamespaces: tc.evictableNamespaces,
MetricsUtilization: metricsUtilization, MetricsUtilization: MetricsUtilization{
MetricsServer: metricsEnabled,
},
}, },
handle) handle)
if err != nil { if err != nil {
@@ -1572,277 +1370,61 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
} }
} }
func withLocalStorage(pod *v1.Pod) { func TestLowNodeUtilizationWithMetrics(t *testing.T) {
// A pod with local storage. return
test.SetNormalOwnerRef(pod) roundTripper := &http.Transport{
pod.Spec.Volumes = []v1.Volume{ Proxy: http.ProxyFromEnvironment,
{ DialContext: (&net.Dialer{
Name: "sample", Timeout: 30 * time.Second,
VolumeSource: v1.VolumeSource{ KeepAlive: 30 * time.Second,
HostPath: &v1.HostPathVolumeSource{Path: "somePath"}, }).DialContext,
EmptyDir: &v1.EmptyDirVolumeSource{ TLSHandshakeTimeout: 10 * time.Second,
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI), TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}
func withCriticalPod(pod *v1.Pod) {
// A Critical Pod.
test.SetNormalOwnerRef(pod)
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
n1NodeName := "n1"
n2NodeName := "n2"
n3NodeName := "n3"
testCases := []struct {
name string
samples model.Vector
nodes []*v1.Node
pods []*v1.Pod
expectedPodsEvicted uint
evictedPods []string
args *LowNodeUtilizationArgs
}{
{
name: "with instance:node_cpu:rate:sum query",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
MetricResource: 30,
},
TargetThresholds: api.ResourceThresholds{
MetricResource: 50,
},
MetricsUtilization: &MetricsUtilization{
Source: api.PrometheusMetrics,
Prometheus: &Prometheus{
Query: "instance:node_cpu:rate:sum",
},
},
},
samples: model.Vector{
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
expectedPodsEvicted: 1,
},
{
name: "with instance:node_cpu:rate:sum query with more evictions",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
MetricResource: 30,
},
TargetThresholds: api.ResourceThresholds{
MetricResource: 50,
},
EvictionLimits: &api.EvictionLimits{
Node: ptr.To[uint](3),
},
MetricsUtilization: &MetricsUtilization{
Source: api.PrometheusMetrics,
Prometheus: &Prometheus{
Query: "instance:node_cpu:rate:sum",
},
},
},
samples: model.Vector{
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
expectedPodsEvicted: 3,
},
{
name: "with instance:node_cpu:rate:sum query with deviation",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
MetricResource: 5,
},
TargetThresholds: api.ResourceThresholds{
MetricResource: 5,
},
EvictionLimits: &api.EvictionLimits{
Node: ptr.To[uint](2),
},
UseDeviationThresholds: true,
MetricsUtilization: &MetricsUtilization{
Source: api.PrometheusMetrics,
Prometheus: &Prometheus{
Query: "instance:node_cpu:rate:sum",
},
},
},
samples: model.Vector{
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
expectedPodsEvicted: 2,
},
{
name: "with instance:node_cpu:rate:sum query and deviation thresholds",
args: &LowNodeUtilizationArgs{
UseDeviationThresholds: true,
Thresholds: api.ResourceThresholds{MetricResource: 10},
TargetThresholds: api.ResourceThresholds{MetricResource: 10},
MetricsUtilization: &MetricsUtilization{
Source: api.PrometheusMetrics,
Prometheus: &Prometheus{
Query: "instance:node_cpu:rate:sum",
},
},
},
samples: model.Vector{
sample("instance:node_cpu:rate:sum", n1NodeName, 1),
sample("instance:node_cpu:rate:sum", n2NodeName, 0.5),
sample("instance:node_cpu:rate:sum", n3NodeName, 0),
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
expectedPodsEvicted: 1,
},
} }
for _, tc := range testCases { AuthToken := "eyJhbGciOiJSUzI1NiIsImtpZCI6IkNoTW9tT2w2cWtzR2V0dURZdjBqdnBSdmdWM29lWmc3dWpfNW0yaDc2NHMifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTcyODk5MjY3NywiaWF0IjoxNzI4OTg5MDc3LCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMiLCJqdGkiOiJkNDY3ZjVmMy0xNGVmLTRkMjItOWJkNC1jMGM1Mzk3NzYyZDgiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6Im9wZW5zaGlmdC1tb25pdG9yaW5nIiwic2VydmljZWFjY291bnQiOnsibmFtZSI6InByb21ldGhldXMtazhzIiwidWlkIjoiNjY4NDllMGItYTAwZC00NjUzLWE5NTItNThiYTE1MTk4NTlkIn19LCJuYmYiOjE3Mjg5ODkwNzcsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpvcGVuc2hpZnQtbW9uaXRvcmluZzpwcm9tZXRoZXVzLWs4cyJ9.J1i6-oRAC9J8mqrlZPKGA-CU5PbUzhm2QxAWFnu65-NXR3e252mesybwtjkwxUtTLKrsYHQXwEsG5rGcQsvMcGK9RC9y5z33DFj8tPPwOGLJYJ-s5cTImTqKtWRXzTlcrsrUYTYApfrOsEyXwyfDow4PCslZjR3cd5FMRbvXNqHLg26nG_smApR4wc6kXy7xxlRuGhxu-dUiscQP56njboOK61JdTG8F3FgOayZnKk1jGeVdIhXClqGWJyokk-ZM3mMK1MxzGXY0tLbe37V4B7g3NDiH651BUcicfDSky46yfcAYxMDbZgpK2TByWApAllN0wixz2WsFfyBVu_Q5xtZ9Gi9BUHSa5ioRiBK346W4Bdmr9ala5ldIXDa59YE7UB34DsCHyqvzRx_Sj76hLzy2jSOk7RsL0fM8sDoJL4ROdi-3Jtr5uPY593I8H8qeQvFS6PQfm0bUZqVKrrLoCK_uk9guH4a6K27SlD-Utk3dpsjbmrwcjBxm-zd_LE9YyQ734My00Pcy9D5eNio3gESjGsHqGFc_haq4ZCiVOCkbdmABjpPEL6K7bs1GMZbHt1CONL0-LzymM8vgGNj0grjpG8-5AF8ZuSqR7pbZSV_NO2nKkmrwpILCw0Joqp6V3C9pP9nXWHIDyVMxMK870zxzt_qCoPRLCAujQQn6e0U"
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) { client, err := promapi.NewClient(promapi.Config{
return func(t *testing.T) { Address: "https://prometheus-k8s-openshift-monitoring.apps.jchaloup-20241015-3.group-b.devcluster.openshift.com",
ctx, cancel := context.WithCancel(context.Background()) RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(AuthToken), roundTripper),
defer cancel() })
if err != nil {
var objs []runtime.Object t.Fatalf("prom client error: %v", err)
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
podsForEviction := make(map[string]struct{})
for _, pod := range tc.evictedPods {
podsForEviction[pod] = struct{}{}
}
evictionFailed := false
if len(tc.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
if eviction, ok := obj.(*policy.Eviction); ok {
if _, exists := podsForEviction[eviction.Name]; exists {
return true, obj, nil
}
evictionFailed = true
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
}
return true, obj, nil
})
}
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
if err != nil {
t.Fatalf("Unable to initialize a framework handle: %v", err)
}
handle.PrometheusClientImpl = &fakePromClient{
result: tc.samples,
dataType: model.ValVector,
}
plugin, err := NewLowNodeUtilization(tc.args, handle)
if err != nil {
t.Fatalf("Unable to initialize the plugin: %v", err)
}
status := plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
if status != nil {
t.Fatalf("Balance.err: %v", status.Err)
}
podsEvicted := podEvictor.TotalEvicted()
if expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
}
}
}
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
} }
// pod:container_cpu_usage:sum
// container_memory_usage_bytes
v1api := promv1.NewAPI(client)
ctx := context.TODO()
// promQuery := "avg_over_time(kube_pod_container_resource_requests[1m])"
promQuery := "kube_pod_container_resource_requests"
results, warnings, err := v1api.Query(ctx, promQuery, time.Now())
fmt.Printf("results: %#v\n", results)
for _, sample := range results.(model.Vector) {
fmt.Printf("sample: %#v\n", sample)
}
fmt.Printf("warnings: %v\n", warnings)
fmt.Printf("err: %v\n", err)
result := model.Value(
&model.Vector{
&model.Sample{
Metric: model.Metric{
"container": "kube-controller-manager",
"endpoint": "https-main",
"job": "kube-state-metrics",
"namespace": "openshift-kube-controller-manager",
"node": "ip-10-0-72-168.us-east-2.compute.internal",
"pod": "kube-controller-manager-ip-10-0-72-168.us-east-2.compute.internal",
"resource": "cpu",
"service": "kube-state-metrics",
"uid": "ae46c09f-ade7-4133-9ee8-cf45ac78ca6d",
"unit": "core",
},
Value: 0.06,
Timestamp: 1728991761711,
},
},
)
fmt.Printf("result: %#v\n", result)
} }

File diff suppressed because it is too large Load Diff

View File

@@ -18,20 +18,18 @@ package nodeutilization
import ( import (
"math" "math"
"reflect"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
) )
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo { var (
nodeInfo := &NodeInfo{ lowPriority = int32(0)
highPriority = int32(10000)
extendedResource = v1.ResourceName("example.com/foo")
testNode1 = NodeInfo{
NodeUsage: NodeUsage{ NodeUsage: NodeUsage{
node: &v1.Node{ node: &v1.Node{
Status: v1.NodeStatus{ Status: v1.NodeStatus{
@@ -46,534 +44,123 @@ func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
}, },
}, },
ObjectMeta: metav1.ObjectMeta{Name: name}, ObjectMeta: metav1.ObjectMeta{Name: "node1"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
},
},
}
testNode2 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
},
},
}
testNode3 = NodeInfo{
NodeUsage: NodeUsage{
node: &v1.Node{
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
},
},
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
},
usage: map[v1.ResourceName]*resource.Quantity{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
}, },
}, },
} }
apply(nodeInfo)
return nodeInfo
}
var (
lowPriority = int32(0)
highPriority = int32(10000)
extendedResource = v1.ResourceName("example.com/foo")
) )
func TestSortNodesByUsage(t *testing.T) { func TestResourceUsagePercentages(t *testing.T) {
tests := []struct { resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
name string node: &v1.Node{
nodeInfoList []NodeInfo Status: v1.NodeStatus{
expectedNodeInfoNames []string Capacity: v1.ResourceList{
}{ v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
{ v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
name: "cpu memory pods", v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
nodeInfoList: []NodeInfo{ },
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) { Allocatable: v1.ResourceList{
nodeInfo.usage = api.ReferencedResourceList{ v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI), v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI), },
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
}
}),
}, },
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
}, },
{ usage: map[v1.ResourceName]*resource.Quantity{
name: "memory", v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
nodeInfoList: []NodeInfo{ v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) { v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
}
}),
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
nodeInfo.usage = api.ReferencedResourceList{
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
}
}),
},
expectedNodeInfoNames: []string{"node3", "node1", "node2"},
}, },
})
expectedUsageInIntPercentage := map[v1.ResourceName]float64{
v1.ResourceCPU: 63,
v1.ResourceMemory: 90,
v1.ResourcePods: 37,
} }
for _, tc := range tests { for resourceName, percentage := range expectedUsageInIntPercentage {
t.Run(tc.name+" descending", func(t *testing.T) { if math.Floor(resourceUsagePercentage[resourceName]) != percentage {
sortNodesByUsage(tc.nodeInfoList, false) // ascending=false, sort nodes in descending order t.Errorf("Incorrect percentange computation, expected %v, got math.Floor(%v) instead", percentage, resourceUsagePercentage[resourceName])
for i := 0; i < len(tc.nodeInfoList); i++ {
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[i] {
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[i], tc.nodeInfoList[i].NodeUsage.node.Name)
}
}
})
t.Run(tc.name+" ascending", func(t *testing.T) {
sortNodesByUsage(tc.nodeInfoList, true) // ascending=true, sort nodes in ascending order
size := len(tc.nodeInfoList)
for i := 0; i < size; i++ {
if tc.nodeInfoList[i].NodeUsage.node.Name != tc.expectedNodeInfoNames[size-i-1] {
t.Errorf("Expected %v, got %v", tc.expectedNodeInfoNames[size-i-1], tc.nodeInfoList[i].NodeUsage.node.Name)
}
}
})
}
}
func TestResourceUsageToResourceThreshold(t *testing.T) {
for _, tt := range []struct {
name string
usage api.ReferencedResourceList
capacity api.ReferencedResourceList
expected api.ResourceThresholds
}{
{
name: "10 percent",
usage: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
},
expected: api.ResourceThresholds{v1.ResourceCPU: 10},
},
{
name: "zeroed out capacity",
usage: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
},
expected: api.ResourceThresholds{v1.ResourceCPU: 0},
},
{
name: "non existing usage",
usage: api.ReferencedResourceList{
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: resource.NewMilliQuantity(100, resource.DecimalSI),
},
expected: api.ResourceThresholds{},
},
{
name: "existing and non existing usage",
usage: api.ReferencedResourceList{
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceCPU: resource.NewMilliQuantity(200, resource.DecimalSI),
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
v1.ResourceMemory: resource.NewMilliQuantity(1000, resource.DecimalSI),
},
expected: api.ResourceThresholds{v1.ResourceCPU: 20},
},
{
name: "nil usage",
usage: api.ReferencedResourceList{
v1.ResourceCPU: nil,
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
},
expected: api.ResourceThresholds{},
},
{
name: "nil capacity",
usage: api.ReferencedResourceList{
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
},
capacity: api.ReferencedResourceList{
v1.ResourceCPU: nil,
},
expected: api.ResourceThresholds{},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := ResourceUsageToResourceThreshold(tt.usage, tt.capacity)
if !reflect.DeepEqual(result, tt.expected) {
t.Errorf("Expected %v, got %v", tt.expected, result)
}
})
}
}
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
result := api.ResourceThresholds{}
for rname, value := range usages {
total, ok := totals[rname]
if !ok {
continue
} }
}
used, avail := value.Value(), total.Value() t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
if rname == v1.ResourceCPU { }
used, avail = value.MilliValue(), total.MilliValue()
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
for i := 0; i < len(expectedNodeList); i++ {
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
} }
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
result[rname] = api.Percentage(pct)
}
return result
}
// This is a test for thresholds being defined as deviations from the average
// usage. This is expected to be a little longer test case. We are going to
// comment the steps to make it easier to follow.
func TestClassificationUsingDeviationThresholds(t *testing.T) {
// These are the two thresholds defined by the user. These thresholds
// mean that our low limit will be 5 pct points below the average and
// the high limit will be 5 pct points above the average.
userDefinedThresholds := map[string]api.ResourceThresholds{
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
}
// Create a fake total amount of resources for all nodes. We define
// the total amount to 1000 for both memory and cpu. This is so we
// can easily calculate (manually) the percentage of usages here.
nodesTotal := normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4", "node5"},
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1000"),
v1.ResourceMemory: resource.MustParse("1000"),
},
)
// Create a fake usage per server per resource. We are aiming to
// have the average of these resources in 50%. When applying the
// thresholds we should obtain the low threhold at 45% and the high
// threshold at 55%.
nodesUsage := map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("480"),
v1.ResourceMemory: resource.MustParse("480"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("520"),
v1.ResourceMemory: resource.MustParse("520"),
},
"node4": {
v1.ResourceCPU: resource.MustParse("500"),
v1.ResourceMemory: resource.MustParse("500"),
},
"node5": {
v1.ResourceCPU: resource.MustParse("900"),
v1.ResourceMemory: resource.MustParse("900"),
},
}
// Normalize the usage to percentages and then calculate the average
// among all nodes.
usage := normalizer.Normalize(nodesUsage, nodesTotal, ResourceListUsageNormalizer)
average := normalizer.Average(usage)
// Create the thresholds by first applying the deviations and then
// replicating once for each node. Thresholds are supposed to be per
// node even though the user provides them only once. This is by
// design as it opens the possibility for further implementations of
// thresholds per node.
thresholds := normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4", "node5"},
[]api.ResourceThresholds{
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
normalizer.Sum(average, userDefinedThresholds["high"]),
},
)
// Classify the nodes according to the thresholds. Nodes below the low
// threshold (45%) are underutilized, nodes above the high threshold
// (55%) are overutilized and nodes in between are properly utilized.
result := classifier.Classify(
usage, thresholds,
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(usage - limit)
},
),
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(limit - usage)
},
),
)
// we expect the node1 to be undertilized (10%), node2, node3 and node4
// to be properly utilized (48%, 52% and 50% respectively) and node5 to
// be overutilized (90%).
expected := []map[string]api.ResourceThresholds{
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
}
if !reflect.DeepEqual(result, expected) {
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
} }
} }
// This is almost a copy of TestUsingDeviationThresholds but we are using func TestSortNodesByUsageAscendingOrder(t *testing.T) {
// pointers here. This is for making sure our generic types are in check. To nodeList := []NodeInfo{testNode1, testNode2, testNode3}
// understand this code better read comments on TestUsingDeviationThresholds. expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
func TestUsingDeviationThresholdsWithPointers(t *testing.T) { sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
userDefinedThresholds := map[string]api.ResourceThresholds{
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
}
nodesTotal := normalizer.Replicate( for i := 0; i < len(expectedNodeList); i++ {
[]string{"node1", "node2", "node3", "node4", "node5"}, if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
map[v1.ResourceName]*resource.Quantity{ t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
v1.ResourceCPU: ptr.To(resource.MustParse("1000")),
v1.ResourceMemory: ptr.To(resource.MustParse("1000")),
},
)
nodesUsage := map[string]map[v1.ResourceName]*resource.Quantity{
"node1": {
v1.ResourceCPU: ptr.To(resource.MustParse("100")),
v1.ResourceMemory: ptr.To(resource.MustParse("100")),
},
"node2": {
v1.ResourceCPU: ptr.To(resource.MustParse("480")),
v1.ResourceMemory: ptr.To(resource.MustParse("480")),
},
"node3": {
v1.ResourceCPU: ptr.To(resource.MustParse("520")),
v1.ResourceMemory: ptr.To(resource.MustParse("520")),
},
"node4": {
v1.ResourceCPU: ptr.To(resource.MustParse("500")),
v1.ResourceMemory: ptr.To(resource.MustParse("500")),
},
"node5": {
v1.ResourceCPU: ptr.To(resource.MustParse("900")),
v1.ResourceMemory: ptr.To(resource.MustParse("900")),
},
}
ptrNormalizer := func(
usages, totals map[v1.ResourceName]*resource.Quantity,
) api.ResourceThresholds {
newUsages := v1.ResourceList{}
for name, usage := range usages {
newUsages[name] = *usage
} }
newTotals := v1.ResourceList{}
for name, total := range totals {
newTotals[name] = *total
}
return ResourceListUsageNormalizer(newUsages, newTotals)
}
usage := normalizer.Normalize(nodesUsage, nodesTotal, ptrNormalizer)
average := normalizer.Average(usage)
thresholds := normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4", "node5"},
[]api.ResourceThresholds{
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
normalizer.Sum(average, userDefinedThresholds["high"]),
},
)
result := classifier.Classify(
usage, thresholds,
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(usage - limit)
},
),
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(limit - usage)
},
),
)
expected := []map[string]api.ResourceThresholds{
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
}
if !reflect.DeepEqual(result, expected) {
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
}
}
func TestNormalizeAndClassify(t *testing.T) {
for _, tt := range []struct {
name string
usage map[string]v1.ResourceList
totals map[string]v1.ResourceList
thresholds map[string][]api.ResourceThresholds
expected []map[string]api.ResourceThresholds
classifiers []classifier.Classifier[string, api.ResourceThresholds]
}{
{
name: "happy path test",
usage: map[string]v1.ResourceList{
"node1": {
// underutilized on cpu and memory.
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10"),
},
"node2": {
// overutilized on cpu and memory.
v1.ResourceCPU: resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
"node3": {
// properly utilized on cpu and memory.
v1.ResourceCPU: resource.MustParse("50"),
v1.ResourceMemory: resource.MustParse("50"),
},
"node4": {
// underutilized on cpu and overutilized on memory.
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("90"),
},
},
totals: normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4"},
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
},
),
thresholds: normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4"},
[]api.ResourceThresholds{
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
},
),
expected: []map[string]api.ResourceThresholds{
{
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
},
{
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
},
},
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(usage - limit)
},
),
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(limit - usage)
},
),
},
},
{
name: "three thresholds",
usage: map[string]v1.ResourceList{
"node1": {
// match for the first classifier.
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("10"),
},
"node2": {
// match for the third classifier.
v1.ResourceCPU: resource.MustParse("90"),
v1.ResourceMemory: resource.MustParse("90"),
},
"node3": {
// match fo the second classifier.
v1.ResourceCPU: resource.MustParse("40"),
v1.ResourceMemory: resource.MustParse("40"),
},
"node4": {
// matches no classifier.
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("90"),
},
"node5": {
// match for the first classifier.
v1.ResourceCPU: resource.MustParse("11"),
v1.ResourceMemory: resource.MustParse("18"),
},
},
totals: normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4", "node5"},
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
},
),
thresholds: normalizer.Replicate(
[]string{"node1", "node2", "node3", "node4", "node5"},
[]api.ResourceThresholds{
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
{v1.ResourceCPU: 50, v1.ResourceMemory: 50},
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
},
),
expected: []map[string]api.ResourceThresholds{
{
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
"node5": {v1.ResourceCPU: 11, v1.ResourceMemory: 18},
},
{
"node3": {v1.ResourceCPU: 40, v1.ResourceMemory: 40},
},
{
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
},
},
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(usage - limit)
},
),
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(usage - limit)
},
),
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
func(usage, limit api.Percentage) int {
return int(limit - usage)
},
),
},
},
} {
t.Run(tt.name, func(t *testing.T) {
pct := normalizer.Normalize(tt.usage, tt.totals, ResourceListUsageNormalizer)
res := classifier.Classify(pct, tt.thresholds, tt.classifiers...)
if !reflect.DeepEqual(res, tt.expected) {
t.Fatalf("unexpected result: %v, expecting: %v", res, tt.expected)
}
})
} }
} }

View File

@@ -1,142 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package normalizer
import (
"math"
"golang.org/x/exp/constraints"
)
// Normalizer is a function that receives two values of the same type and
// return an object of a different type. An usage case can be a function
// that converts a memory usage from mb to % (the first argument would be
// the memory usage in mb and the second argument would be the total memory
// available in mb).
type Normalizer[V, N any] func(V, V) N
// Values is a map of values indexed by a comparable key. An example of this
// can be a list of resources indexed by a node name.
type Values[K comparable, V any] map[K]V
// Number is an interface that represents a number. Represents things we
// can do math operations on.
type Number interface {
constraints.Integer | constraints.Float
}
// Normalize uses a Normalizer function to normalize a set of values. For
// example one may want to convert a set of memory usages from mb to %.
// This function receives a set of usages, a set of totals, and a Normalizer
// function. The function will return a map with the normalized values.
func Normalize[K comparable, V, N any](usages, totals Values[K, V], fn Normalizer[V, N]) map[K]N {
result := Values[K, N]{}
for key, value := range usages {
total, ok := totals[key]
if !ok {
continue
}
result[key] = fn(value, total)
}
return result
}
// Replicate replicates the provide value for each key in the provided slice.
// Returns a map with the keys and the provided value.
func Replicate[K comparable, V any](keys []K, value V) map[K]V {
result := map[K]V{}
for _, key := range keys {
result[key] = value
}
return result
}
// Clamp imposes minimum and maximum limits on a set of values. The function
// will return a set of values where each value is between the minimum and
// maximum values (included). Values below minimum are rounded up to the
// minimum value, and values above maximum are rounded down to the maximum
// value.
func Clamp[K comparable, N Number, V ~map[K]N](values V, minimum, maximum N) V {
result := V{}
for key := range values {
value := values[key]
value = N(math.Max(float64(value), float64(minimum)))
value = N(math.Min(float64(value), float64(maximum)))
result[key] = value
}
return result
}
// Map applies a function to each element of a map of values. Returns a new
// slice with the results of applying the function to each element.
func Map[K comparable, N Number, V ~map[K]N](items []V, fn func(V) V) []V {
result := []V{}
for _, item := range items {
result = append(result, fn(item))
}
return result
}
// Negate converts the values of a map to their negated values.
func Negate[K comparable, N Number, V ~map[K]N](values V) V {
result := V{}
for key, value := range values {
result[key] = -value
}
return result
}
// Round rounds the values of a map to the nearest integer. Calls math.Round on
// each value of the map.
func Round[K comparable, N Number, V ~map[K]N](values V) V {
result := V{}
for key, value := range values {
result[key] = N(math.Round(float64(value)))
}
return result
}
// Sum sums up the values of two maps. Values are expected to be of Number
// type. Original values are preserved. If a key is present in one map but
// not in the other, the key is ignored.
func Sum[K comparable, N Number, V ~map[K]N](mapA, mapB V) V {
result := V{}
for name, value := range mapA {
result[name] = value + mapB[name]
}
return result
}
// Average calculates the average of a set of values. This function receives
// a map of values and returns the average of all the values. Average expects
// the values to represent the same unit of measure. You can use this function
// after Normalizing the values.
func Average[J, K comparable, N Number, V ~map[J]N](values map[K]V) V {
counter := map[J]int{}
result := V{}
for _, imap := range values {
for name, value := range imap {
result[name] += value
counter[name]++
}
}
for name := range result {
result[name] /= N(counter[name])
}
return result
}

View File

@@ -1,649 +0,0 @@
/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package normalizer
import (
"fmt"
"math"
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/descheduler/pkg/api"
)
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
result := api.ResourceThresholds{}
for rname, value := range usages {
total, ok := totals[rname]
if !ok {
continue
}
used, avail := value.Value(), total.Value()
if rname == v1.ResourceCPU {
used, avail = value.MilliValue(), total.MilliValue()
}
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
result[rname] = api.Percentage(pct)
}
return result
}
func TestNormalizeSimple(t *testing.T) {
for _, tt := range []struct {
name string
usages map[string]float64
totals map[string]float64
expected map[string]float64
normalizer Normalizer[float64, float64]
}{
{
name: "single normalization",
usages: map[string]float64{"cpu": 1},
totals: map[string]float64{"cpu": 2},
expected: map[string]float64{"cpu": 0.5},
normalizer: func(usage, total float64) float64 {
return usage / total
},
},
{
name: "multiple normalizations",
usages: map[string]float64{
"cpu": 1,
"mem": 6,
},
totals: map[string]float64{
"cpu": 2,
"mem": 10,
},
expected: map[string]float64{
"cpu": 0.5,
"mem": 0.6,
},
normalizer: func(usage, total float64) float64 {
return usage / total
},
},
{
name: "missing totals for a key",
usages: map[string]float64{
"cpu": 1,
"mem": 6,
},
totals: map[string]float64{
"cpu": 2,
},
expected: map[string]float64{
"cpu": 0.5,
},
normalizer: func(usage, total float64) float64 {
return usage / total
},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := Normalize(tt.usages, tt.totals, tt.normalizer)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}
func TestNormalize(t *testing.T) {
for _, tt := range []struct {
name string
usages map[string]v1.ResourceList
totals map[string]v1.ResourceList
expected map[string]api.ResourceThresholds
normalizer Normalizer[v1.ResourceList, api.ResourceThresholds]
}{
{
name: "single normalization",
usages: map[string]v1.ResourceList{
"node1": {v1.ResourceCPU: resource.MustParse("1")},
},
totals: map[string]v1.ResourceList{
"node1": {v1.ResourceCPU: resource.MustParse("2")},
},
expected: map[string]api.ResourceThresholds{
"node1": {v1.ResourceCPU: 50},
},
normalizer: ResourceListUsageNormalizer,
},
{
name: "multiple normalization",
usages: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("6"),
v1.ResourcePods: resource.MustParse("2"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("20"),
v1.ResourcePods: resource.MustParse("30"),
},
},
totals: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("6"),
v1.ResourcePods: resource.MustParse("100"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
v1.ResourcePods: resource.MustParse("100"),
},
},
expected: map[string]api.ResourceThresholds{
"node1": {
v1.ResourceCPU: 50,
v1.ResourceMemory: 100,
v1.ResourcePods: 2,
},
"node2": {
v1.ResourceCPU: 10,
v1.ResourceMemory: 20,
v1.ResourcePods: 30,
},
},
normalizer: ResourceListUsageNormalizer,
},
{
name: "multiple normalization with over 100% usage",
usages: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("120"),
v1.ResourceMemory: resource.MustParse("130"),
v1.ResourcePods: resource.MustParse("140"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("150"),
v1.ResourceMemory: resource.MustParse("160"),
v1.ResourcePods: resource.MustParse("170"),
},
},
totals: Replicate(
[]string{"node1", "node2"},
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
v1.ResourcePods: resource.MustParse("100"),
},
),
expected: Replicate(
[]string{"node1", "node2"},
api.ResourceThresholds{
v1.ResourceCPU: 100,
v1.ResourceMemory: 100,
v1.ResourcePods: 100,
},
),
normalizer: ResourceListUsageNormalizer,
},
{
name: "multiple normalization with over 100% usage and different totals",
usages: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("99"),
v1.ResourceMemory: resource.MustParse("99Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("8"),
v1.ResourceMemory: resource.MustParse("8Gi"),
},
},
totals: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100Gi"),
},
"node3": {
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
},
expected: map[string]api.ResourceThresholds{
"node1": {
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
},
"node2": {
v1.ResourceCPU: 99,
v1.ResourceMemory: 99,
},
"node3": {
v1.ResourceCPU: 100,
v1.ResourceMemory: 100,
},
},
normalizer: ResourceListUsageNormalizer,
},
} {
t.Run(tt.name, func(t *testing.T) {
result := Normalize(tt.usages, tt.totals, tt.normalizer)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}
func TestAverage(t *testing.T) {
for _, tt := range []struct {
name string
usage map[string]v1.ResourceList
limits map[string]v1.ResourceList
expected api.ResourceThresholds
}{
{
name: "empty usage",
usage: map[string]v1.ResourceList{},
limits: map[string]v1.ResourceList{},
expected: api.ResourceThresholds{},
},
{
name: "fifty percent usage",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("6"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("6"),
},
},
limits: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("2"),
v1.ResourceMemory: resource.MustParse("12"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("4"),
v1.ResourceMemory: resource.MustParse("12"),
},
},
expected: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
},
},
{
name: "mixed percent usage",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("80"),
v1.ResourcePods: resource.MustParse("20"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("20"),
v1.ResourceMemory: resource.MustParse("60"),
v1.ResourcePods: resource.MustParse("20"),
},
},
limits: Replicate(
[]string{"node1", "node2"},
v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
v1.ResourceMemory: resource.MustParse("100"),
v1.ResourcePods: resource.MustParse("10000"),
},
),
expected: api.ResourceThresholds{
v1.ResourceCPU: 15,
v1.ResourceMemory: 70,
v1.ResourcePods: 0.2,
},
},
{
name: "mixed limits",
usage: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("30"),
v1.ResourcePods: resource.MustParse("200"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("72"),
v1.ResourcePods: resource.MustParse("200"),
},
},
limits: map[string]v1.ResourceList{
"node1": {
v1.ResourceCPU: resource.MustParse("10"),
v1.ResourceMemory: resource.MustParse("100"),
v1.ResourcePods: resource.MustParse("1000"),
},
"node2": {
v1.ResourceCPU: resource.MustParse("1000"),
v1.ResourceMemory: resource.MustParse("180"),
v1.ResourcePods: resource.MustParse("10"),
},
},
expected: api.ResourceThresholds{
v1.ResourceCPU: 50.5,
v1.ResourceMemory: 35,
v1.ResourcePods: 60,
},
},
{
name: "some nodes missing some resources",
usage: map[string]v1.ResourceList{
"node1": {
"limit-exists-in-all": resource.MustParse("10"),
"limit-exists-in-two": resource.MustParse("11"),
"limit-does-not-exist": resource.MustParse("12"),
"usage-exists-in-all": resource.MustParse("13"),
"usage-exists-in-two": resource.MustParse("20"),
},
"node2": {
"limit-exists-in-all": resource.MustParse("10"),
"limit-exists-in-two": resource.MustParse("11"),
"limit-does-not-exist": resource.MustParse("12"),
"usage-exists-in-all": resource.MustParse("13"),
"usage-exists-in-two": resource.MustParse("20"),
},
"node3": {
"limit-exists-in-all": resource.MustParse("10"),
"limit-exists-in-two": resource.MustParse("11"),
"limit-does-not-exist": resource.MustParse("12"),
"usage-exists-in-all": resource.MustParse("13"),
},
"node4": {
"limit-exists-in-all": resource.MustParse("10"),
"limit-exists-in-two": resource.MustParse("11"),
"limit-does-not-exist": resource.MustParse("12"),
"usage-exists-in-all": resource.MustParse("13"),
},
"node5": {
"random-usage-without-limit": resource.MustParse("10"),
},
},
limits: map[string]v1.ResourceList{
"node1": {
"limit-exists-in-all": resource.MustParse("100"),
"limit-exists-in-two": resource.MustParse("100"),
"usage-exists-in-all": resource.MustParse("100"),
"usage-exists-in-two": resource.MustParse("100"),
"usage-does-not-exist": resource.MustParse("100"),
},
"node2": {
"limit-exists-in-all": resource.MustParse("100"),
"limit-exists-in-two": resource.MustParse("100"),
"usage-exists-in-all": resource.MustParse("100"),
"usage-exists-in-two": resource.MustParse("100"),
"usage-does-not-exist": resource.MustParse("100"),
},
"node3": {
"limit-exists-in-all": resource.MustParse("100"),
"usage-exists-in-all": resource.MustParse("100"),
"usage-exists-in-two": resource.MustParse("100"),
"usage-does-not-exist": resource.MustParse("100"),
},
"node4": {
"limit-exists-in-all": resource.MustParse("100"),
"usage-exists-in-all": resource.MustParse("100"),
"usage-exists-in-two": resource.MustParse("100"),
"usage-does-not-exist": resource.MustParse("100"),
},
"node5": {
"random-limit-without-usage": resource.MustParse("100"),
},
},
expected: api.ResourceThresholds{
"limit-exists-in-all": 10,
"limit-exists-in-two": 11,
"usage-exists-in-all": 13,
"usage-exists-in-two": 20,
},
},
} {
t.Run(tt.name, func(t *testing.T) {
average := Average(
Normalize(
tt.usage, tt.limits, ResourceListUsageNormalizer,
),
)
if !reflect.DeepEqual(average, tt.expected) {
t.Fatalf("unexpected result: %v, expected: %v", average, tt.expected)
}
})
}
}
func TestSum(t *testing.T) {
for _, tt := range []struct {
name string
data api.ResourceThresholds
deviations []api.ResourceThresholds
expected []api.ResourceThresholds
}{
{
name: "single deviation",
data: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
v1.ResourcePods: 50,
},
deviations: []api.ResourceThresholds{
{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
v1.ResourcePods: 1,
},
{
v1.ResourceCPU: 2,
v1.ResourceMemory: 2,
v1.ResourcePods: 2,
},
{
v1.ResourceCPU: 3,
v1.ResourceMemory: 3,
v1.ResourcePods: 3,
},
},
expected: []api.ResourceThresholds{
{
v1.ResourceCPU: 51,
v1.ResourceMemory: 51,
v1.ResourcePods: 51,
},
{
v1.ResourceCPU: 52,
v1.ResourceMemory: 52,
v1.ResourcePods: 52,
},
{
v1.ResourceCPU: 53,
v1.ResourceMemory: 53,
v1.ResourcePods: 53,
},
},
},
{
name: "deviate with negative values",
data: api.ResourceThresholds{
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
v1.ResourcePods: 50,
},
deviations: []api.ResourceThresholds{
{
v1.ResourceCPU: -2,
v1.ResourceMemory: -2,
v1.ResourcePods: -2,
},
{
v1.ResourceCPU: -1,
v1.ResourceMemory: -1,
v1.ResourcePods: -1,
},
{
v1.ResourceCPU: 0,
v1.ResourceMemory: 0,
v1.ResourcePods: 0,
},
{
v1.ResourceCPU: 1,
v1.ResourceMemory: 1,
v1.ResourcePods: 1,
},
{
v1.ResourceCPU: 2,
v1.ResourceMemory: 2,
v1.ResourcePods: 2,
},
},
expected: []api.ResourceThresholds{
{
v1.ResourceCPU: 48,
v1.ResourceMemory: 48,
v1.ResourcePods: 48,
},
{
v1.ResourceCPU: 49,
v1.ResourceMemory: 49,
v1.ResourcePods: 49,
},
{
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
v1.ResourcePods: 50,
},
{
v1.ResourceCPU: 51,
v1.ResourceMemory: 51,
v1.ResourcePods: 51,
},
{
v1.ResourceCPU: 52,
v1.ResourceMemory: 52,
v1.ResourcePods: 52,
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
result := []api.ResourceThresholds{}
for _, deviation := range tt.deviations {
partial := Sum(tt.data, deviation)
result = append(result, partial)
}
if len(result) != len(tt.deviations) {
t.Fatalf("unexpected result: %v", result)
}
if !reflect.DeepEqual(result, tt.expected) {
fmt.Printf("%T, %T\n", result, tt.expected)
t.Fatalf("unexpected result: %v", result)
}
})
}
}
func TestClamp(t *testing.T) {
for _, tt := range []struct {
name string
data []api.ResourceThresholds
minimum api.Percentage
maximum api.Percentage
expected []api.ResourceThresholds
}{
{
name: "all over the limit",
data: []api.ResourceThresholds{
{
v1.ResourceCPU: 50,
v1.ResourceMemory: 50,
v1.ResourcePods: 50,
},
},
minimum: 10,
maximum: 20,
expected: []api.ResourceThresholds{
{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
v1.ResourcePods: 20,
},
},
},
{
name: "some over some below the limits",
data: []api.ResourceThresholds{
{
v1.ResourceCPU: 7,
v1.ResourceMemory: 8,
v1.ResourcePods: 88,
},
},
minimum: 10,
maximum: 20,
expected: []api.ResourceThresholds{
{
v1.ResourceCPU: 10,
v1.ResourceMemory: 10,
v1.ResourcePods: 20,
},
},
},
{
name: "all within the limits",
data: []api.ResourceThresholds{
{
v1.ResourceCPU: 15,
v1.ResourceMemory: 15,
v1.ResourcePods: 15,
},
},
minimum: 10,
maximum: 20,
expected: []api.ResourceThresholds{
{
v1.ResourceCPU: 15,
v1.ResourceMemory: 15,
v1.ResourcePods: 15,
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
fn := func(thresholds api.ResourceThresholds) api.ResourceThresholds {
return Clamp(thresholds, tt.minimum, tt.maximum)
}
result := Map(tt.data, fn)
if !reflect.DeepEqual(result, tt.expected) {
t.Fatalf("unexpected result: %v", result)
}
})
}
}

View File

@@ -18,18 +18,6 @@ import (
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
) )
// EvictionMode describe a mode of eviction. See the list below for the
// available modes.
type EvictionMode string
const (
// EvictionModeOnlyThresholdingResources makes the descheduler evict
// only pods that have a resource request defined for any of the user
// provided thresholds. If the pod does not request the resource, it
// will not be evicted.
EvictionModeOnlyThresholdingResources EvictionMode = "OnlyThresholdingResources"
)
// +k8s:deepcopy-gen=true // +k8s:deepcopy-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -40,15 +28,12 @@ type LowNodeUtilizationArgs struct {
Thresholds api.ResourceThresholds `json:"thresholds"` Thresholds api.ResourceThresholds `json:"thresholds"`
TargetThresholds api.ResourceThresholds `json:"targetThresholds"` TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
NumberOfNodes int `json:"numberOfNodes,omitempty"` NumberOfNodes int `json:"numberOfNodes,omitempty"`
MetricsUtilization *MetricsUtilization `json:"metricsUtilization,omitempty"` MetricsUtilization MetricsUtilization `json:metricsUtilization,omitempty`
// Naming this one differently since namespaces are still // Naming this one differently since namespaces are still
// considered while considering resources used by pods // considered while considering resources used by pods
// but then filtered out before eviction // but then filtered out before eviction
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"` EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
// evictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
EvictionLimits *api.EvictionLimits `json:"evictionLimits,omitempty"`
} }
// +k8s:deepcopy-gen=true // +k8s:deepcopy-gen=true
@@ -57,15 +42,9 @@ type LowNodeUtilizationArgs struct {
type HighNodeUtilizationArgs struct { type HighNodeUtilizationArgs struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
Thresholds api.ResourceThresholds `json:"thresholds"` Thresholds api.ResourceThresholds `json:"thresholds"`
NumberOfNodes int `json:"numberOfNodes,omitempty"` NumberOfNodes int `json:"numberOfNodes,omitempty"`
MetricsUtilization MetricsUtilization `json:metricsUtilization,omitempty`
// EvictionModes is a set of modes to be taken into account when the
// descheduler evicts pods. For example the mode
// `OnlyThresholdingResources` can be used to make sure the descheduler
// only evicts pods who have resource requests for the defined
// thresholds.
EvictionModes []EvictionMode `json:"evictionModes,omitempty"`
// Naming this one differently since namespaces are still // Naming this one differently since namespaces are still
// considered while considering resources used by pods // considered while considering resources used by pods
@@ -74,24 +53,8 @@ type HighNodeUtilizationArgs struct {
} }
// MetricsUtilization allow to consume actual resource utilization from metrics // MetricsUtilization allow to consume actual resource utilization from metrics
// +k8s:deepcopy-gen=true
type MetricsUtilization struct { type MetricsUtilization struct {
// metricsServer enables metrics from a kubernetes metrics server. // metricsServer enables metrics from a kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more. // Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
// Deprecated. Use Source instead.
MetricsServer bool `json:"metricsServer,omitempty"` MetricsServer bool `json:"metricsServer,omitempty"`
// source enables the plugin to consume metrics from a metrics source.
// Currently only KubernetesMetrics available.
Source api.MetricsSource `json:"source,omitempty"`
// prometheus enables metrics collection through a prometheus query.
Prometheus *Prometheus `json:"prometheus,omitempty"`
}
type Prometheus struct {
// query returning a vector of samples, each sample labeled with `instance`
// corresponding to a node name with each sample value as a real number
// in <0; 1> interval.
Query string `json:"query,omitempty"`
} }

View File

@@ -19,67 +19,38 @@ package nodeutilization
import ( import (
"context" "context"
"fmt" "fmt"
"time"
promapi "github.com/prometheus/client_golang/api"
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2" "k8s.io/klog/v2"
utilptr "k8s.io/utils/ptr" utilptr "k8s.io/utils/ptr"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node" nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils" "sigs.k8s.io/descheduler/pkg/utils"
) )
type UsageClientType int
const (
requestedUsageClientType UsageClientType = iota
actualUsageClientType
prometheusUsageClientType
)
type notSupportedError struct {
usageClientType UsageClientType
}
func (e notSupportedError) Error() string {
return "maximum number of evicted pods per node reached"
}
func newNotSupportedError(usageClientType UsageClientType) *notSupportedError {
return &notSupportedError{
usageClientType: usageClientType,
}
}
type usageClient interface { type usageClient interface {
// Both low/high node utilization plugins are expected to invoke sync right nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
// after Balance method is invoked. There's no cache invalidation so each nodes() []*v1.Node
// Balance is expected to get the latest data by invoking sync.
sync(ctx context.Context, nodes []*v1.Node) error
nodeUtilization(node string) api.ReferencedResourceList
pods(node string) []*v1.Pod pods(node string) []*v1.Pod
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) capture(nodes []*v1.Node) error
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
} }
type requestedUsageClient struct { type requestedUsageClient struct {
resourceNames []v1.ResourceName resourceNames []v1.ResourceName
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
_nodes []*v1.Node
_pods map[string][]*v1.Pod _pods map[string][]*v1.Pod
_nodeUtilization map[string]api.ReferencedResourceList _nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
} }
var _ usageClient = &requestedUsageClient{} var _ usageClient = &requestedUsageClient{}
func newRequestedUsageClient( func newRequestedUsageSnapshot(
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) *requestedUsageClient { ) *requestedUsageClient {
@@ -89,31 +60,36 @@ func newRequestedUsageClient(
} }
} }
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList { func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
return s._nodeUtilization[node] return s._nodeUtilization[node]
} }
func (s *requestedUsageClient) nodes() []*v1.Node {
return s._nodes
}
func (s *requestedUsageClient) pods(node string) []*v1.Pod { func (s *requestedUsageClient) pods(node string) []*v1.Pod {
return s._pods[node] return s._pods[node]
} }
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) { func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
usage := make(api.ReferencedResourceList) usage := make(map[v1.ResourceName]*resource.Quantity)
for _, resourceName := range s.resourceNames { for _, resourceName := range s.resourceNames {
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy()) usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
} }
return usage, nil return usage, nil
} }
func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error { func (s *requestedUsageClient) capture(nodes []*v1.Node) error {
s._nodeUtilization = make(map[string]api.ReferencedResourceList) s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
s._pods = make(map[string][]*v1.Pod) s._pods = make(map[string][]*v1.Pod)
capturedNodes := []*v1.Node{}
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, s.getPodsAssignedToNode, nil) pods, err := podutil.ListPodsOnANode(node.Name, s.getPodsAssignedToNode, nil)
if err != nil { if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err) klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err) continue
} }
nodeUsage, err := nodeutil.NodeUtilization(pods, s.resourceNames, func(pod *v1.Pod) (v1.ResourceList, error) { nodeUsage, err := nodeutil.NodeUtilization(pods, s.resourceNames, func(pod *v1.Pod) (v1.ResourceList, error) {
@@ -127,8 +103,11 @@ func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error
// store the snapshot of pods from the same (or the closest) node utilization computation // store the snapshot of pods from the same (or the closest) node utilization computation
s._pods[node.Name] = pods s._pods[node.Name] = pods
s._nodeUtilization[node.Name] = nodeUsage s._nodeUtilization[node.Name] = nodeUsage
capturedNodes = append(capturedNodes, node)
} }
s._nodes = capturedNodes
return nil return nil
} }
@@ -137,13 +116,14 @@ type actualUsageClient struct {
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
metricsCollector *metricscollector.MetricsCollector metricsCollector *metricscollector.MetricsCollector
_nodes []*v1.Node
_pods map[string][]*v1.Pod _pods map[string][]*v1.Pod
_nodeUtilization map[string]api.ReferencedResourceList _nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
} }
var _ usageClient = &actualUsageClient{} var _ usageClient = &actualUsageClient{}
func newActualUsageClient( func newActualUsageSnapshot(
resourceNames []v1.ResourceName, resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
metricsCollector *metricscollector.MetricsCollector, metricsCollector *metricscollector.MetricsCollector,
@@ -155,15 +135,19 @@ func newActualUsageClient(
} }
} }
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList { func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
return client._nodeUtilization[node] return client._nodeUtilization[node]
} }
func (client *actualUsageClient) nodes() []*v1.Node {
return client._nodes
}
func (client *actualUsageClient) pods(node string) []*v1.Pod { func (client *actualUsageClient) pods(node string) []*v1.Pod {
return client._pods[node] return client._pods[node]
} }
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) { func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted. // It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA). // Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@@ -171,14 +155,11 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceLi
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err) return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
} }
totalUsage := make(api.ReferencedResourceList) totalUsage := make(map[v1.ResourceName]*resource.Quantity)
for _, container := range podMetrics.Containers { for _, container := range podMetrics.Containers {
for _, resourceName := range client.resourceNames { for _, resourceName := range client.resourceNames {
if resourceName == v1.ResourcePods {
continue
}
if _, exists := container.Usage[resourceName]; !exists { if _, exists := container.Usage[resourceName]; !exists {
return nil, fmt.Errorf("pod %v/%v: container %q is missing %q resource", pod.Namespace, pod.Name, container.Name, resourceName) continue
} }
if totalUsage[resourceName] == nil { if totalUsage[resourceName] == nil {
totalUsage[resourceName] = utilptr.To[resource.Quantity](container.Usage[resourceName].DeepCopy()) totalUsage[resourceName] = utilptr.To[resource.Quantity](container.Usage[resourceName].DeepCopy())
@@ -191,131 +172,31 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceLi
return totalUsage, nil return totalUsage, nil
} }
func (client *actualUsageClient) sync(ctx context.Context, nodes []*v1.Node) error { func (client *actualUsageClient) capture(nodes []*v1.Node) error {
client._nodeUtilization = make(map[string]api.ReferencedResourceList) client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
client._pods = make(map[string][]*v1.Pod) client._pods = make(map[string][]*v1.Pod)
capturedNodes := []*v1.Node{}
nodesUsage, err := client.metricsCollector.AllNodesUsage()
if err != nil {
return err
}
for _, node := range nodes { for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil) pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
if err != nil { if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err) klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err) continue
} }
collectedNodeUsage, ok := nodesUsage[node.Name] nodeUsage, err := client.metricsCollector.NodeUsage(node)
if !ok { if err != nil {
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name) return err
} }
collectedNodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI) nodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
nodeUsage := api.ReferencedResourceList{}
for _, resourceName := range client.resourceNames {
if _, exists := collectedNodeUsage[resourceName]; !exists {
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
}
nodeUsage[resourceName] = collectedNodeUsage[resourceName]
}
// store the snapshot of pods from the same (or the closest) node utilization computation // store the snapshot of pods from the same (or the closest) node utilization computation
client._pods[node.Name] = pods client._pods[node.Name] = pods
client._nodeUtilization[node.Name] = nodeUsage client._nodeUtilization[node.Name] = nodeUsage
capturedNodes = append(capturedNodes, node)
} }
return nil client._nodes = capturedNodes
}
type prometheusUsageClient struct {
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
promClient promapi.Client
promQuery string
_pods map[string][]*v1.Pod
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
}
var _ usageClient = &actualUsageClient{}
func newPrometheusUsageClient(
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
promClient promapi.Client,
promQuery string,
) *prometheusUsageClient {
return &prometheusUsageClient{
getPodsAssignedToNode: getPodsAssignedToNode,
promClient: promClient,
promQuery: promQuery,
}
}
func (client *prometheusUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
return client._nodeUtilization[node]
}
func (client *prometheusUsageClient) pods(node string) []*v1.Pod {
return client._pods[node]
}
func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
return nil, newNotSupportedError(prometheusUsageClientType)
}
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
if err != nil {
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
}
if len(warnings) > 0 {
klog.Infof("prometheus metrics warnings: %v", warnings)
}
if results.Type() != model.ValVector {
return nil, fmt.Errorf("expected query results to be of type %q, got %q instead", model.ValVector, results.Type())
}
nodeUsages := make(map[string]map[v1.ResourceName]*resource.Quantity)
for _, sample := range results.(model.Vector) {
nodeName, exists := sample.Metric["instance"]
if !exists {
return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key")
}
if sample.Value < 0 || sample.Value > 1 {
return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value)
}
nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{
MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI),
}
}
return nodeUsages, nil
}
func (client *prometheusUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
client._pods = make(map[string][]*v1.Pod)
nodeUsages, err := NodeUsageFromPrometheusMetrics(ctx, client.promClient, client.promQuery)
if err != nil {
return err
}
for _, node := range nodes {
if _, exists := nodeUsages[node.Name]; !exists {
return fmt.Errorf("unable to find metric entry for %v", node.Name)
}
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
}
// store the snapshot of pods from the same (or the closest) node utilization computation
client._pods[node.Name] = pods
client._nodeUtilization[node.Name] = nodeUsages[node.Name]
}
return nil return nil
} }

View File

@@ -18,32 +18,23 @@ package nodeutilization
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"net/http"
"net/url"
"testing" "testing"
"github.com/prometheus/common/model"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
fakeclientset "k8s.io/client-go/kubernetes/fake" fakeclientset "k8s.io/client-go/kubernetes/fake"
"k8s.io/metrics/pkg/apis/metrics/v1beta1" "k8s.io/metrics/pkg/apis/metrics/v1beta1"
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake" fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector" "sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod" podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test" "sigs.k8s.io/descheduler/test"
) )
var ( var gvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodemetricses"}
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
)
func updateMetricsAndCheckNodeUtilization( func updateMetricsAndCheckNodeUtilization(
t *testing.T, t *testing.T,
@@ -51,32 +42,36 @@ func updateMetricsAndCheckNodeUtilization(
newValue, expectedValue int64, newValue, expectedValue int64,
metricsClientset *fakemetricsclient.Clientset, metricsClientset *fakemetricsclient.Clientset,
collector *metricscollector.MetricsCollector, collector *metricscollector.MetricsCollector,
usageClient usageClient, usageSnapshot usageClient,
nodes []*v1.Node, nodes []*v1.Node,
nodeName string, nodeName string,
nodemetrics *v1beta1.NodeMetrics, nodemetrics *v1beta1.NodeMetrics,
) { ) {
t.Logf("Set current node cpu usage to %v", newValue) t.Logf("Set current node cpu usage to %v", newValue)
nodemetrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(newValue, resource.DecimalSI) nodemetrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(newValue, resource.DecimalSI)
metricsClientset.Tracker().Update(nodesgvr, nodemetrics, "") metricsClientset.Tracker().Update(gvr, nodemetrics, "")
err := collector.Collect(ctx) err := collector.Collect(ctx)
if err != nil { if err != nil {
t.Fatalf("failed to capture metrics: %v", err) t.Fatalf("failed to capture metrics: %v", err)
} }
err = usageClient.sync(ctx, nodes) err = usageSnapshot.capture(nodes)
if err != nil { if err != nil {
t.Fatalf("failed to sync a snapshot: %v", err) t.Fatalf("failed to capture a snapshot: %v", err)
} }
nodeUtilization := usageClient.nodeUtilization(nodeName) nodeUtilization := usageSnapshot.nodeUtilization(nodeName)
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue()) t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
if nodeUtilization[v1.ResourceCPU].MilliValue() != expectedValue { if nodeUtilization[v1.ResourceCPU].MilliValue() != expectedValue {
t.Fatalf("cpu node usage expected to be %v, got %v instead", expectedValue, nodeUtilization[v1.ResourceCPU].MilliValue()) t.Fatalf("cpu node usage expected to be %v, got %v instead", expectedValue, nodeUtilization[v1.ResourceCPU].MilliValue())
} }
pods := usageClient.pods(nodeName) pods := usageSnapshot.pods(nodeName)
fmt.Printf("pods: %#v\n", pods) fmt.Printf("pods: %#v\n", pods)
if len(pods) != 2 { if len(pods) != 2 {
t.Fatalf("expected 2 pods for node %v, got %v instead", nodeName, len(pods)) t.Fatalf("expected 2 pods for node %v, got %v instead", nodeName, len(pods))
} }
capturedNodes := usageSnapshot.nodes()
if len(capturedNodes) != 3 {
t.Fatalf("expected 3 captured node, got %v instead", len(capturedNodes))
}
} }
func TestActualUsageClient(t *testing.T) { func TestActualUsageClient(t *testing.T) {
@@ -96,10 +91,7 @@ func TestActualUsageClient(t *testing.T) {
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816) n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3) clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
metricsClientset := fakemetricsclient.NewSimpleClientset() metricsClientset := fakemetricsclient.NewSimpleClientset(n1metrics, n2metrics, n3metrics)
metricsClientset.Tracker().Create(nodesgvr, n1metrics, "")
metricsClientset.Tracker().Create(nodesgvr, n2metrics, "")
metricsClientset.Tracker().Create(nodesgvr, n3metrics, "")
ctx := context.TODO() ctx := context.TODO()
@@ -110,7 +102,6 @@ func TestActualUsageClient(t *testing.T) {
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0) sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer() podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer) podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil { if err != nil {
t.Fatalf("Build get pods assigned to node function error: %v", err) t.Fatalf("Build get pods assigned to node function error: %v", err)
@@ -119,9 +110,9 @@ func TestActualUsageClient(t *testing.T) {
sharedInformerFactory.Start(ctx.Done()) sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done()) sharedInformerFactory.WaitForCacheSync(ctx.Done())
collector := metricscollector.NewMetricsCollector(nodeLister, metricsClientset, labels.Everything()) collector := metricscollector.NewMetricsCollector(clientset, metricsClientset)
usageClient := newActualUsageClient( usageSnapshot := newActualUsageSnapshot(
resourceNames, resourceNames,
podsAssignedToNode, podsAssignedToNode,
collector, collector,
@@ -129,171 +120,16 @@ func TestActualUsageClient(t *testing.T) {
updateMetricsAndCheckNodeUtilization(t, ctx, updateMetricsAndCheckNodeUtilization(t, ctx,
1400, 1400, 1400, 1400,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics, metricsClientset, collector, usageSnapshot, nodes, n2.Name, n2metrics,
) )
updateMetricsAndCheckNodeUtilization(t, ctx, updateMetricsAndCheckNodeUtilization(t, ctx,
500, 1310, 500, 1310,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics, metricsClientset, collector, usageSnapshot, nodes, n2.Name, n2metrics,
) )
updateMetricsAndCheckNodeUtilization(t, ctx, updateMetricsAndCheckNodeUtilization(t, ctx,
900, 1269, 900, 1269,
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics, metricsClientset, collector, usageSnapshot, nodes, n2.Name, n2metrics,
) )
} }
type fakePromClient struct {
result interface{}
dataType model.ValueType
}
type fakePayload struct {
Status string `json:"status"`
Data queryResult `json:"data"`
}
type queryResult struct {
Type model.ValueType `json:"resultType"`
Result interface{} `json:"result"`
}
func (client *fakePromClient) URL(ep string, args map[string]string) *url.URL {
return &url.URL{}
}
func (client *fakePromClient) Do(ctx context.Context, request *http.Request) (*http.Response, []byte, error) {
jsonData, err := json.Marshal(fakePayload{
Status: "success",
Data: queryResult{
Type: client.dataType,
Result: client.result,
},
})
return &http.Response{StatusCode: 200}, jsonData, err
}
func sample(metricName, nodeName string, value float64) *model.Sample {
return &model.Sample{
Metric: model.Metric{
"__name__": model.LabelValue(metricName),
"instance": model.LabelValue(nodeName),
},
Value: model.SampleValue(value),
Timestamp: 1728991761711,
}
}
func TestPrometheusUsageClient(t *testing.T) {
n1 := test.BuildTestNode("ip-10-0-17-165.ec2.internal", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("ip-10-0-51-101.ec2.internal", 2000, 3000, 10, nil)
n3 := test.BuildTestNode("ip-10-0-94-25.ec2.internal", 2000, 3000, 10, nil)
nodes := []*v1.Node{n1, n2, n3}
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
tests := []struct {
name string
result interface{}
dataType model.ValueType
nodeUsage map[string]int64
err error
}{
{
name: "valid data",
dataType: model.ValVector,
result: model.Vector{
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 0.20381818181818104),
sample("instance:node_cpu:rate:sum", "ip-10-0-17-165.ec2.internal", 0.4245454545454522),
sample("instance:node_cpu:rate:sum", "ip-10-0-94-25.ec2.internal", 0.5695757575757561),
},
nodeUsage: map[string]int64{
"ip-10-0-51-101.ec2.internal": 20,
"ip-10-0-17-165.ec2.internal": 42,
"ip-10-0-94-25.ec2.internal": 56,
},
},
{
name: "invalid data missing instance label",
dataType: model.ValVector,
result: model.Vector{
&model.Sample{
Metric: model.Metric{
"__name__": model.LabelValue("instance:node_cpu:rate:sum"),
},
Value: model.SampleValue(0.20381818181818104),
Timestamp: 1728991761711,
},
},
err: fmt.Errorf("The collected metrics sample is missing 'instance' key"),
},
{
name: "invalid data value out of range",
dataType: model.ValVector,
result: model.Vector{
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104),
},
err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"),
},
{
name: "invalid data not a vector",
dataType: model.ValScalar,
result: model.Scalar{
Value: model.SampleValue(0.20381818181818104),
Timestamp: 1728991761711,
},
err: fmt.Errorf("expected query results to be of type \"vector\", got \"scalar\" instead"),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
pClient := &fakePromClient{
result: tc.result,
dataType: tc.dataType,
}
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
ctx := context.TODO()
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Fatalf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
prometheusUsageClient := newPrometheusUsageClient(podsAssignedToNode, pClient, "instance:node_cpu:rate:sum")
err = prometheusUsageClient.sync(ctx, nodes)
if tc.err == nil {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
} else {
if err == nil {
t.Fatalf("unexpected %q error, got nil instead", tc.err)
} else if err.Error() != tc.err.Error() {
t.Fatalf("expected %q error, got %q instead", tc.err, err)
}
return
}
for _, node := range nodes {
nodeUtil := prometheusUsageClient.nodeUtilization(node.Name)
if nodeUtil[MetricResource].Value() != tc.nodeUsage[node.Name] {
t.Fatalf("expected %q node utilization to be %v, got %v instead", node.Name, tc.nodeUsage[node.Name], nodeUtil[MetricResource])
} else {
t.Logf("%v node utilization: %v", node.Name, nodeUtil[MetricResource])
}
}
})
}
}

View File

@@ -30,25 +30,7 @@ func ValidateHighNodeUtilizationArgs(obj runtime.Object) error {
if err != nil { if err != nil {
return err return err
} }
// make sure we know about the eviction modes defined by the user.
return validateEvictionModes(args.EvictionModes)
}
// validateEvictionModes checks if the eviction modes are valid/known
// to the descheduler.
func validateEvictionModes(modes []EvictionMode) error {
// we are using this approach to make the code more extensible
// in the future.
validModes := map[EvictionMode]bool{
EvictionModeOnlyThresholdingResources: true,
}
for _, mode := range modes {
if validModes[mode] {
continue
}
return fmt.Errorf("invalid eviction mode %s", mode)
}
return nil return nil
} }
@@ -62,17 +44,6 @@ func ValidateLowNodeUtilizationArgs(obj runtime.Object) error {
if err != nil { if err != nil {
return err return err
} }
if args.MetricsUtilization != nil {
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.MetricsServer {
return fmt.Errorf("it is not allowed to set both %q source and metricsServer", api.KubernetesMetrics)
}
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.Prometheus != nil {
return fmt.Errorf("prometheus configuration is not allowed to set when source is set to %q", api.KubernetesMetrics)
}
if args.MetricsUtilization.Source == api.PrometheusMetrics && (args.MetricsUtilization.Prometheus == nil || args.MetricsUtilization.Prometheus.Query == "") {
return fmt.Errorf("prometheus query is required when metrics source is set to %q", api.PrometheusMetrics)
}
}
return nil return nil
} }

View File

@@ -21,239 +21,164 @@ import (
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/descheduler/pkg/api" "sigs.k8s.io/descheduler/pkg/api"
) )
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) { func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
extendedResource := v1.ResourceName("example.com/foo") extendedResource := v1.ResourceName("example.com/foo")
tests := []struct { tests := []struct {
name string name string
args *LowNodeUtilizationArgs thresholds api.ResourceThresholds
errInfo error targetThresholds api.ResourceThresholds
errInfo error
}{ }{
{ {
name: "passing invalid thresholds", name: "passing invalid thresholds",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 120,
v1.ResourceMemory: 120, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80,
},
}, },
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf( errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)), "%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
}, },
{ {
name: "thresholds and targetThresholds configured different num of resources", name: "thresholds and targetThresholds configured different num of resources",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80, v1.ResourcePods: 80,
v1.ResourcePods: 80,
},
}, },
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"), errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
}, },
{ {
name: "thresholds and targetThresholds configured different resources", name: "thresholds and targetThresholds configured different resources",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourcePods: 80,
v1.ResourcePods: 80,
},
}, },
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"), errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
}, },
{ {
name: "thresholds' CPU config value is greater than targetThresholds'", name: "thresholds' CPU config value is greater than targetThresholds'",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 90,
v1.ResourceCPU: 90, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80,
},
}, },
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU), errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
}, },
{ {
name: "only thresholds configured extended resource", name: "only thresholds configured extended resource",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, extendedResource: 20,
extendedResource: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80,
},
}, },
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"), errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
}, },
{ {
name: "only targetThresholds configured extended resource", name: "only targetThresholds configured extended resource",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80, extendedResource: 80,
extendedResource: 80,
},
}, },
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"), errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
}, },
{ {
name: "thresholds and targetThresholds configured different extended resources", name: "thresholds and targetThresholds configured different extended resources",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, extendedResource: 20,
extendedResource: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80, "example.com/bar": 80,
"example.com/bar": 80,
},
}, },
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"), errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
}, },
{ {
name: "thresholds' extended resource config value is greater than targetThresholds'", name: "thresholds' extended resource config value is greater than targetThresholds'",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, extendedResource: 90,
extendedResource: 90, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80, extendedResource: 20,
extendedResource: 20,
},
}, },
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource), errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
}, },
{ {
name: "passing valid plugin config", name: "passing valid plugin config",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80,
},
}, },
errInfo: nil, errInfo: nil,
}, },
{ {
name: "passing valid plugin config with extended resource", name: "passing valid plugin config with extended resource",
args: &LowNodeUtilizationArgs{ thresholds: api.ResourceThresholds{
Thresholds: api.ResourceThresholds{ v1.ResourceCPU: 20,
v1.ResourceCPU: 20, v1.ResourceMemory: 20,
v1.ResourceMemory: 20, extendedResource: 20,
extendedResource: 20, },
}, targetThresholds: api.ResourceThresholds{
TargetThresholds: api.ResourceThresholds{ v1.ResourceCPU: 80,
v1.ResourceCPU: 80, v1.ResourceMemory: 80,
v1.ResourceMemory: 80, extendedResource: 80,
extendedResource: 80,
},
}, },
errInfo: nil, errInfo: nil,
}, },
{
name: "setting both kubernetes metrics source and metricsserver",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
TargetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
MetricsUtilization: &MetricsUtilization{
MetricsServer: true,
Source: api.KubernetesMetrics,
},
},
errInfo: fmt.Errorf("it is not allowed to set both \"KubernetesMetrics\" source and metricsServer"),
},
{
name: "missing prometheus query",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
TargetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
MetricsUtilization: &MetricsUtilization{
Source: api.PrometheusMetrics,
},
},
errInfo: fmt.Errorf("prometheus query is required when metrics source is set to \"Prometheus\""),
},
{
name: "prometheus set when source set to kubernetes metrics",
args: &LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
v1.ResourceCPU: 20,
v1.ResourceMemory: 20,
extendedResource: 20,
},
TargetThresholds: api.ResourceThresholds{
v1.ResourceCPU: 80,
v1.ResourceMemory: 80,
extendedResource: 80,
},
MetricsUtilization: &MetricsUtilization{
Source: api.KubernetesMetrics,
Prometheus: &Prometheus{},
},
},
errInfo: fmt.Errorf("prometheus configuration is not allowed to set when source is set to \"KubernetesMetrics\""),
},
} }
for _, testCase := range tests { for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) { args := &LowNodeUtilizationArgs{
validateErr := ValidateLowNodeUtilizationArgs(runtime.Object(testCase.args)) Thresholds: testCase.thresholds,
if validateErr == nil || testCase.errInfo == nil { TargetThresholds: testCase.targetThresholds,
if validateErr != testCase.errInfo { }
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr) validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
}
} else if validateErr.Error() != testCase.errInfo.Error() { if validateErr == nil || testCase.errInfo == nil {
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr) if validateErr != testCase.errInfo {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
} }
}) } else if validateErr.Error() != testCase.errInfo.Error() {
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
}
} }
} }

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -37,11 +37,6 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.EvictionModes != nil {
in, out := &in.EvictionModes, &out.EvictionModes
*out = make([]EvictionMode, len(*in))
copy(*out, *in)
}
if in.EvictableNamespaces != nil { if in.EvictableNamespaces != nil {
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
*out = new(api.Namespaces) *out = new(api.Namespaces)
@@ -86,21 +81,11 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
(*out)[key] = val (*out)[key] = val
} }
} }
if in.MetricsUtilization != nil {
in, out := &in.MetricsUtilization, &out.MetricsUtilization
*out = new(MetricsUtilization)
(*in).DeepCopyInto(*out)
}
if in.EvictableNamespaces != nil { if in.EvictableNamespaces != nil {
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
*out = new(api.Namespaces) *out = new(api.Namespaces)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.EvictionLimits != nil {
in, out := &in.EvictionLimits, &out.EvictionLimits
*out = new(api.EvictionLimits)
(*in).DeepCopyInto(*out)
}
return return
} }
@@ -121,24 +106,3 @@ func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
} }
return nil return nil
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsUtilization) DeepCopyInto(out *MetricsUtilization) {
*out = *in
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(Prometheus)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsUtilization.
func (in *MetricsUtilization) DeepCopy() *MetricsUtilization {
if in == nil {
return nil
}
out := new(MetricsUtilization)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 The Kubernetes Authors. Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@@ -417,23 +417,18 @@ func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable
// followed by the highest priority pods with affinity or nodeSelector // followed by the highest priority pods with affinity or nodeSelector
sort.Slice(list, func(i, j int) bool { sort.Slice(list, func(i, j int) bool {
// any non-evictable pods should be considered last (ie, first in the list) // any non-evictable pods should be considered last (ie, first in the list)
evictableI := isEvictable(list[i]) if !isEvictable(list[i]) || !isEvictable(list[j]) {
evictableJ := isEvictable(list[j])
if !evictableI || !evictableJ {
// false - i is the only non-evictable, so return true to put it first // false - i is the only non-evictable, so return true to put it first
// true - j is non-evictable, so return false to put j before i // true - j is non-evictable, so return false to put j before i
// if true and both and non-evictable, order doesn't matter // if true and both and non-evictable, order doesn't matter
return !(evictableI && !evictableJ) return !(isEvictable(list[i]) && !isEvictable(list[j]))
} }
hasSelectorOrAffinityI := hasSelectorOrAffinity(*list[i])
hasSelectorOrAffinityJ := hasSelectorOrAffinity(*list[j])
// if both pods have selectors/affinity, compare them by their priority // if both pods have selectors/affinity, compare them by their priority
if hasSelectorOrAffinityI == hasSelectorOrAffinityJ { if hasSelectorOrAffinity(*list[i]) == hasSelectorOrAffinity(*list[j]) {
// Sort by priority in ascending order (lower priority Pods first) comparePodsByPriority(list[i], list[j])
return !comparePodsByPriority(list[i], list[j])
} }
return hasSelectorOrAffinityI && !hasSelectorOrAffinityJ return hasSelectorOrAffinity(*list[i]) && !hasSelectorOrAffinity(*list[j])
}) })
sortedTopologies = append(sortedTopologies, topology{pair: pair, pods: list}) sortedTopologies = append(sortedTopologies, topology{pair: pair, pods: list})
} }

View File

@@ -3,7 +3,6 @@ package removepodsviolatingtopologyspreadconstraint
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"sort" "sort"
"testing" "testing"
@@ -1477,231 +1476,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
} }
} }
func TestSortDomains(t *testing.T) {
tests := []struct {
name string
constraintTopology map[topologyPair][]*v1.Pod
want []topology
}{
{
name: "empty input",
constraintTopology: map[topologyPair][]*v1.Pod{},
want: []topology{},
},
{
name: "single domain with mixed pods",
constraintTopology: map[topologyPair][]*v1.Pod{
{"zone", "a"}: {
{
ObjectMeta: metav1.ObjectMeta{
Name: "non-evictable-pod",
Annotations: map[string]string{"evictable": "false"},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "evictable-with-affinity",
Annotations: map[string]string{"evictable": "true", "hasSelectorOrAffinity": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](10),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "evictable-high-priority",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
},
},
want: []topology{
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "non-evictable-pod",
Annotations: map[string]string{"evictable": "false"},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "evictable-with-affinity",
Annotations: map[string]string{"evictable": "true", "hasSelectorOrAffinity": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](10),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "evictable-high-priority",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
}},
},
},
{
name: "multiple domains with different priorities and selectors",
constraintTopology: map[topologyPair][]*v1.Pod{
{"zone", "a"}: {
{
ObjectMeta: metav1.ObjectMeta{
Name: "high-priority-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](20),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "low-priority-no-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](5),
},
},
},
{"zone", "b"}: {
{
ObjectMeta: metav1.ObjectMeta{
Name: "medium-priority-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "non-evictable-pod",
Annotations: map[string]string{"evictable": "false"},
},
},
},
},
want: []topology{
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "low-priority-no-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](5),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "high-priority-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](20),
},
},
}},
{pair: topologyPair{"zone", "b"}, pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "non-evictable-pod",
Annotations: map[string]string{"evictable": "false"},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "medium-priority-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
}},
},
},
{
name: "domain with pods having different selector/affinity",
constraintTopology: map[topologyPair][]*v1.Pod{
{"zone", "a"}: {
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{},
},
Priority: utilptr.To[int32](10),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-no-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
},
},
want: []topology{
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-with-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{},
},
Priority: utilptr.To[int32](10),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-no-affinity",
Annotations: map[string]string{"evictable": "true"},
},
Spec: v1.PodSpec{
Priority: utilptr.To[int32](15),
},
},
}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mockIsEvictable := func(pod *v1.Pod) bool {
if val, exists := pod.Annotations["evictable"]; exists {
return val == "true"
}
return false
}
got := sortDomains(tt.constraintTopology, mockIsEvictable)
sort.Slice(got, func(i, j int) bool {
return got[i].pair.value < got[j].pair.value
})
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("sortDomains() = %v, want %v", got, tt.want)
}
})
}
}
type testPodList struct { type testPodList struct {
count int count int
node string node string

Some files were not shown because too many files have changed in this diff Show More