mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 13:29:11 +01:00
Compare commits
76 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
045fbb6a04 | ||
|
|
035849c721 | ||
|
|
a3ca3093e5 | ||
|
|
ee376e12ac | ||
|
|
4750dc19e6 | ||
|
|
6bf75bedef | ||
|
|
2b450c15b8 | ||
|
|
2a27d1be90 | ||
|
|
6e1832b1af | ||
|
|
a249c9baf0 | ||
|
|
6e9622bf41 | ||
|
|
f66804396e | ||
|
|
3a5c651136 | ||
|
|
68207da9c8 | ||
|
|
35a7178df6 | ||
|
|
cca28f7bbe | ||
|
|
38b1f5c1a8 | ||
|
|
c2ed7eb575 | ||
|
|
17b90969cf | ||
|
|
2cce60dc2b | ||
|
|
b4b203cc60 | ||
|
|
59d1d5d1b9 | ||
|
|
98e6ed6587 | ||
|
|
4548723dea | ||
|
|
7542cac9d0 | ||
|
|
8871272d35 | ||
|
|
a31a3b5e85 | ||
|
|
9b9ae9a3be | ||
|
|
c22d773200 | ||
|
|
9ba0f9b410 | ||
|
|
95a631f6a5 | ||
|
|
89535b9b9b | ||
|
|
54d0a22ad1 | ||
|
|
87ba84b2ad | ||
|
|
b300faece0 | ||
|
|
04ebdbee32 | ||
|
|
bda785f7dc | ||
|
|
6ab73d6ac5 | ||
|
|
e283c31030 | ||
|
|
aed345994f | ||
|
|
be4abe1727 | ||
|
|
e14b86eb8c | ||
|
|
a4d6119bcd | ||
|
|
57bb31de78 | ||
|
|
17d9b152a2 | ||
|
|
b935c7d82c | ||
|
|
b8e3c0bba3 | ||
|
|
5bf11813e6 | ||
|
|
d883c8a9e1 | ||
|
|
50dd3b8971 | ||
|
|
fd9f2b4614 | ||
|
|
655ab516c7 | ||
|
|
0d5301ead2 | ||
|
|
d97f1c9057 | ||
|
|
57a04aae9f | ||
|
|
f3abaf48ae | ||
|
|
88af72b907 | ||
|
|
fa3fb4e954 | ||
|
|
d5b609b34a | ||
|
|
9c6604fc51 | ||
|
|
1a49e116df | ||
|
|
4775db9e2f | ||
|
|
52d5d6398c | ||
|
|
6100c914b4 | ||
|
|
0b3c022c32 | ||
|
|
335c698b38 | ||
|
|
e39ae80628 | ||
|
|
3440abfa41 | ||
|
|
e6d0caa1bc | ||
|
|
e085610bfd | ||
|
|
03246d6843 | ||
|
|
5a201a32d9 | ||
|
|
fc484030b9 | ||
|
|
c02c889734 | ||
|
|
ca7afd60b9 | ||
|
|
7369b1291e |
6
.github/workflows/manifests.yaml
vendored
6
.github/workflows/manifests.yaml
vendored
@@ -7,10 +7,11 @@ jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.32.0"]
|
||||
descheduler-version: ["v0.32.1"]
|
||||
k8s-version: ["v1.33.0"]
|
||||
descheduler-version: ["v0.33.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.27.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
@@ -21,6 +22,7 @@ jobs:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
version: ${{ matrix.kind-version }}
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.23.3
|
||||
FROM golang:1.24.2
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -21,7 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
USER 1000
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -26,7 +26,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.62.2
|
||||
GOLANGCI_VERSION := v1.64.8
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
|
||||
2
OWNERS
2
OWNERS
@@ -13,6 +13,8 @@ reviewers:
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
- googs1025
|
||||
- ricardomaraschini
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
112
README.md
112
README.md
@@ -33,15 +33,15 @@ but relies on the default scheduler for that.
|
||||
## ⚠️ Documentation Versions by Release
|
||||
|
||||
If you are using a published release of Descheduler (such as
|
||||
`registry.k8s.io/descheduler/descheduler:v0.31.0`), follow the documentation in
|
||||
`registry.k8s.io/descheduler/descheduler:v0.33.0`), follow the documentation in
|
||||
that version's release branch, as listed below:
|
||||
|
||||
|Descheduler Version|Docs link|
|
||||
|---|---|
|
||||
|v0.33.x|[`release-1.33`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.33/README.md)|
|
||||
|v0.32.x|[`release-1.32`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.32/README.md)|
|
||||
|v0.31.x|[`release-1.31`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.31/README.md)|
|
||||
|v0.30.x|[`release-1.30`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.30/README.md)|
|
||||
|v0.29.x|[`release-1.29`](https://github.com/kubernetes-sigs/descheduler/blob/release-1.29/README.md)|
|
||||
|
||||
The
|
||||
[`master`](https://github.com/kubernetes-sigs/descheduler/blob/master/README.md)
|
||||
@@ -93,17 +93,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.32' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=release-1.33' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -118,14 +118,31 @@ The Descheduler Policy is configurable and includes default strategy plugins tha
|
||||
|
||||
These are top level keys in the Descheduler Policy that you can use to configure all evictions.
|
||||
|
||||
| Name |type| Default Value | Description |
|
||||
|------|----|---------------|-------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point |
|
||||
| `maxNoOfPodsToEvictPerNode` |`int`| `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` |`int`| `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictTotal` |`int`| `nil` | maximum number of pods evicted per rescheduling cycle (summed through all strategies) |
|
||||
| `metricsCollector` |`object`| `nil` | configures collection of metrics for actual resource utilization |
|
||||
| `metricsCollector.enabled` |`bool`| `false` | enables kubernetes [metrics server](https://kubernetes-sigs.github.io/metrics-server/) collection |
|
||||
| Name | type | Default Value | Description |
|
||||
|------------------------------------|----------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` | `string` | `nil` | Limiting the nodes which are processed. Only used when `nodeFit`=`true` and only by the PreEvictionFilter Extension Point. |
|
||||
| `maxNoOfPodsToEvictPerNode` | `int` | `nil` | Maximum number of pods evicted from each node (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `int` | `nil` | Maximum number of pods evicted from each namespace (summed through all strategies). |
|
||||
| `maxNoOfPodsToEvictTotal` | `int` | `nil` | Maximum number of pods evicted per rescheduling cycle (summed through all strategies). |
|
||||
| `metricsCollector` (deprecated) | `object` | `nil` | Configures collection of metrics for actual resource utilization. |
|
||||
| `metricsCollector.enabled` | `bool` | `false` | Enables Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) collection. |
|
||||
| `metricsProviders` | `[]object` | `nil` | Enables various metrics providers like Kubernetes [Metrics Server](https://kubernetes-sigs.github.io/metrics-server/) |
|
||||
| `evictionFailureEventNotification` | `bool` | `false` | Enables eviction failure event notification. |
|
||||
| `gracePeriodSeconds` | `int` | `0` | The duration in seconds before the object should be deleted. The value zero indicates delete immediately. |
|
||||
| `prometheus` |`object`| `nil` | Configures collection of Prometheus metrics for actual resource utilization |
|
||||
| `prometheus.url` |`string`| `nil` | Points to a Prometheus server url |
|
||||
| `prometheus.authToken` |`object`| `nil` | Sets Prometheus server authentication token. If not specified in cluster authentication token from the container's file system is read. |
|
||||
| `prometheus.authToken.secretReference` |`object`| `nil` | Read the authentication token from a kubernetes secret (the secret is expected to contain the token under `prometheusAuthToken` data key) |
|
||||
| `prometheus.authToken.secretReference.namespace` |`string`| `nil` | Authentication token kubernetes secret namespace (currently, the RBAC configuration permits retrieving secrets from the `kube-system` namespace. If the secret needs to be accessed from a different namespace, the existing RBAC rules must be explicitly extended. |
|
||||
| `prometheus.authToken.secretReference.name` |`string`| `nil` | Authentication token kubernetes secret name |
|
||||
|
||||
The descheduler currently allows to configure a metric collection of Kubernetes Metrics through `metricsProviders` field.
|
||||
The previous way of setting `metricsCollector` field is deprecated. There are currently two sources to configure:
|
||||
- `KubernetesMetrics`: enables metrics collection from Kubernetes Metrics server
|
||||
- `Prometheus`: enables metrics collection from Prometheus server
|
||||
|
||||
In general, each plugin can consume metrics from a different provider so multiple distinct providers can be configured in parallel.
|
||||
|
||||
|
||||
### Evictor Plugin configuration (Default Evictor)
|
||||
|
||||
@@ -135,6 +152,7 @@ The Default Evictor Plugin is used by default for filtering pods before processi
|
||||
|---------------------------|----|---------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `nodeSelector` |`string`| `nil` | limiting the nodes which are processed |
|
||||
| `evictLocalStoragePods` |`bool`| `false` | allows eviction of pods with local storage |
|
||||
| `evictDaemonSetPods` | bool | false | allows eviction of DaemonSet managed Pods. |
|
||||
| `evictSystemCriticalPods` |`bool`| `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` |`bool`| `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `evictFailedBarePods` |`bool`| `false` | allow eviction of pods without owner references and in failed phase |
|
||||
@@ -160,8 +178,16 @@ nodeSelector: "node=node1" # you don't need to set this, if not set all will be
|
||||
maxNoOfPodsToEvictPerNode: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictPerNamespace: 5000 # you don't need to set this, unlimited if not set
|
||||
maxNoOfPodsToEvictTotal: 5000 # you don't need to set this, unlimited if not set
|
||||
metricsCollector:
|
||||
enabled: true # you don't need to set this, metrics are not collected if not set
|
||||
gracePeriodSeconds: 60 # you don't need to set this, 0 if not set
|
||||
# you don't need to set this, metrics are not collected if not set
|
||||
metricsProviders:
|
||||
- source: Prometheus
|
||||
prometheus:
|
||||
url: http://prometheus-kube-prometheus-prometheus.prom.svc.cluster.local
|
||||
authToken:
|
||||
secretReference:
|
||||
namespace: "kube-system"
|
||||
name: "authtoken"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
@@ -285,9 +311,14 @@ A resource consumption above (resp. below) this window is considered as overutil
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field.
|
||||
In order to have the plugin consume the metrics the metric collector needs to be configured as well.
|
||||
See `metricsCollector` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
actual usage metrics. Metrics-based descheduling can be enabled by setting `metricsUtilization.metricsServer` field (deprecated)
|
||||
or `metricsUtilization.source` field to `KubernetesMetrics`.
|
||||
In order to have the plugin consume the metrics the metric provider needs to be configured as well.
|
||||
Alternatively, it is possible to create a prometheus client and configure a prometheus query to consume
|
||||
metrics outside of the kubernetes metrics server. The query is expected to return a vector of values for
|
||||
each node. The values are expected to be any real number within <0; 1> interval. During eviction only
|
||||
a single pod is evicted at most from each overutilized node. There's currently no support for evicting more.
|
||||
See `metricsProviders` field at [Top Level configuration](#top-level-configuration) for available options.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
@@ -297,9 +328,12 @@ See `metricsCollector` field at [Top Level configuration](#top-level-configurati
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionLimits`|object|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|`metricsUtilization`|object|
|
||||
|`metricsUtilization.metricsServer`|bool|
|
||||
|`metricsUtilization.metricsServer` (deprecated)|bool|
|
||||
|`metricsUtilization.source`|string|
|
||||
|`metricsUtilization.prometheus.query`|string|
|
||||
|
||||
|
||||
**Example:**
|
||||
@@ -320,8 +354,12 @@ profiles:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
metricsUtilization:
|
||||
metricsServer: true
|
||||
# metricsUtilization:
|
||||
# source: Prometheus
|
||||
# prometheus:
|
||||
# query: instance:node_cpu:rate:sum
|
||||
evictionLimits:
|
||||
node: 5
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -337,10 +375,12 @@ and will not be used to compute node's usage if it's not specified in `threshold
|
||||
* The valid range of the resource's percentage value is \[0, 100\]
|
||||
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
|
||||
|
||||
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
|
||||
This parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
There are two more parameters associated with the `LowNodeUtilization` strategy, called `numberOfNodes` and `evictionLimits`.
|
||||
The first parameter can be configured to activate the strategy only when the number of under utilized nodes
|
||||
are above the configured value. This could be helpful in large clusters where a few nodes could go
|
||||
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
|
||||
The second parameter is useful when a number of evictions per the plugin per a descheduling cycle needs to be limited.
|
||||
The parameter currently enables to limit the number of evictions per node through `node` field.
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
@@ -366,6 +406,12 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
To control pod eviction from underutilized nodes, use the `evictionModes`
|
||||
array. A lenient policy, which evicts pods regardless of their resource
|
||||
requests, is the default. To enable a stricter policy that only evicts pods
|
||||
with resource requests defined for the provided threshold resources, add the
|
||||
option `OnlyThresholdingResources` to the `evictionModes` configuration.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
@@ -378,8 +424,15 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|---|---|
|
||||
|`thresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`evictionModes`|list(string)|
|
||||
|`evictableNamespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
|
||||
**Supported Eviction Modes:**
|
||||
|
||||
|Name|Description|
|
||||
|---|---|
|
||||
|`OnlyThresholdingResources`|Evict only pods that have resource requests defined for the provided threshold resources.|
|
||||
|
||||
**Example:**
|
||||
|
||||
```yaml
|
||||
@@ -398,6 +451,8 @@ profiles:
|
||||
exclude:
|
||||
- "kube-system"
|
||||
- "namespace1"
|
||||
evictionModes:
|
||||
- "OnlyThresholdingResources"
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
@@ -990,10 +1045,12 @@ To get best results from HA mode some additional configurations might require:
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
|-------|-------|----------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
| name | type | description |
|
||||
|---------------------------------------|--------------|-----------------------------------------------------------------------------------|
|
||||
| build_info | gauge | constant 1 |
|
||||
| pods_evicted | CounterVec | total number of pods evicted |
|
||||
| descheduler_loop_duration_seconds | HistogramVec | time taken to complete a whole descheduling cycle (support _bucket, _sum, _count) |
|
||||
| descheduler_strategy_duration_seconds | HistogramVec | time taken to complete each stragtegy of descheduling operation (support _bucket, _sum, _count) |
|
||||
|
||||
The metrics are served through https://localhost:10258/metrics by default.
|
||||
The address and port can be changed by setting `--binding-address` and `--secure-port` flags.
|
||||
@@ -1009,6 +1066,7 @@ packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.33 | v1.33 |
|
||||
| v0.32 | v1.32 |
|
||||
| v0.31 | v1.31 |
|
||||
| v0.30 | v1.30 |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.32.1
|
||||
appVersion: 0.32.1
|
||||
version: 0.32.0
|
||||
appVersion: 0.32.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
@@ -13,4 +13,4 @@ sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
email: sig-scheduling@kubernetes.io
|
||||
|
||||
@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
||||
@@ -10,3 +10,13 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
A DeschedulerPolicy has been applied for you. You can view the policy with:
|
||||
|
||||
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
|
||||
|
||||
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
|
||||
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
|
||||
|
||||
deschedulerPolicy: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -36,9 +36,13 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
|
||||
{{- if and .Values.deschedulerPolicy }}
|
||||
{{- range .Values.deschedulerPolicy.metricsProviders }}
|
||||
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -59,7 +59,9 @@ spec:
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
|
||||
@@ -89,15 +89,12 @@ cmdOptions:
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsCollector:
|
||||
# enabled: true
|
||||
# metricsProviders:
|
||||
# - source: KubernetesMetrics
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -54,6 +55,7 @@ type DeschedulerServer struct {
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
PrometheusClient promapi.Client
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
|
||||
@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||
4. Cut release branch from `master`, eg `release-1.24`
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
**Patch release**
|
||||
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Merge Helm chart version update to release branch
|
||||
4. Perform the image promotion process for the patch version
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
### Flowchart
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.32.1 | registry.k8s.io/descheduler/descheduler:v0.32.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
108
go.mod
108
go.mod
@@ -1,28 +1,30 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.23.3
|
||||
go 1.24.2
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
|
||||
go.opentelemetry.io/otel/sdk v1.28.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
google.golang.org/grpc v1.65.0
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/apiserver v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
k8s.io/code-generator v0.32.0
|
||||
k8s.io/component-base v0.32.0
|
||||
k8s.io/component-helpers v0.32.0
|
||||
go.opentelemetry.io/otel v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
||||
go.opentelemetry.io/otel/sdk v1.33.0
|
||||
go.opentelemetry.io/otel/trace v1.33.0
|
||||
google.golang.org/grpc v1.68.1
|
||||
k8s.io/api v0.33.0
|
||||
k8s.io/apimachinery v0.33.0
|
||||
k8s.io/apiserver v0.33.0
|
||||
k8s.io/client-go v0.33.0
|
||||
k8s.io/code-generator v0.33.0
|
||||
k8s.io/component-base v0.33.0
|
||||
k8s.io/component-helpers v0.33.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.32.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
k8s.io/metrics v0.33.0
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
@@ -30,8 +32,10 @@ require (
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.18.0 // indirect
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
@@ -58,74 +62,72 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.1 // indirect
|
||||
github.com/golang/glog v1.2.4 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/cel-go v0.22.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.23.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect
|
||||
k8s.io/kms v0.32.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
|
||||
k8s.io/kms v0.33.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
)
|
||||
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
|
||||
replace golang.org/x/net => golang.org/x/net v0.33.0
|
||||
|
||||
replace golang.org/x/crypto => golang.org/x/crypto v0.31.0
|
||||
|
||||
310
go.sum
310
go.sum
@@ -1,5 +1,5 @@
|
||||
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
|
||||
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
|
||||
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
@@ -108,11 +108,11 @@ github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/K
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
|
||||
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc=
|
||||
github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -130,12 +130,13 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 h1:oKYOfNR7Hp6XpZ4JqolL5u642Js5Z0n7psPVl+S5heo=
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7/go.mod h1:aii0r/K0ZnHv7G0KF7xy1v0A7s2Ljrb5byB7MO5p6TU=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g=
|
||||
github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4=
|
||||
github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -143,8 +144,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
@@ -158,16 +160,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
@@ -177,11 +179,15 @@ github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
@@ -191,6 +197,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@@ -209,6 +217,8 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
@@ -269,17 +279,17 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -297,6 +307,8 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@@ -305,8 +317,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
@@ -320,38 +332,40 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow=
|
||||
go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE=
|
||||
go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8=
|
||||
go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA=
|
||||
go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU=
|
||||
go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
@@ -359,8 +373,22 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/dl v0.0.0-20190829154251-82a15e2f2ead/go.mod h1:IUMfjQLJQd4UTqG1Z90tenwKoCX93Gn3MAQJMOSBsDQ=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
@@ -384,11 +412,49 @@ golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -402,18 +468,24 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -422,6 +494,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -435,19 +508,38 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
@@ -459,10 +551,10 @@ golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -488,7 +580,6 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -503,15 +594,15 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
|
||||
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -526,8 +617,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -556,29 +647,29 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
|
||||
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
|
||||
k8s.io/apiextensions-apiserver v0.30.0 h1:jcZFKMqnICJfRxTgnC4E+Hpcq8UEhT8B2lhBcQ+6uAs=
|
||||
k8s.io/apiextensions-apiserver v0.30.0/go.mod h1:N9ogQFGcrbWqAY9p2mUAL5mGxsLqwgtUce127VtRX5Y=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs=
|
||||
k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
|
||||
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc=
|
||||
k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8=
|
||||
k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98=
|
||||
k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU=
|
||||
k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s=
|
||||
k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU=
|
||||
k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM=
|
||||
k8s.io/component-helpers v0.32.0 h1:pQEEBmRt3pDJJX98cQvZshDgJFeKRM4YtYkMmfOlczw=
|
||||
k8s.io/component-helpers v0.32.0/go.mod h1:9RuClQatbClcokXOcDWSzFKQm1huIf0FzQlPRpizlMc=
|
||||
k8s.io/code-generator v0.33.0 h1:B212FVl6EFqNmlgdOZYWNi77yBv+ed3QgQsMR8YQCw4=
|
||||
k8s.io/code-generator v0.33.0/go.mod h1:KnJRokGxjvbBQkSJkbVuBbu6z4B0rC7ynkpY5Aw6m9o=
|
||||
k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk=
|
||||
k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU=
|
||||
k8s.io/component-helpers v0.33.0 h1:0AdW0A0mIgljLgtG0hJDdJl52PPqTrtMgOgtm/9i/Ys=
|
||||
k8s.io/component-helpers v0.33.0/go.mod h1:9SRiXfLldPw9lEEuSsapMtvT8j/h1JyFFapbtybwKvU=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4=
|
||||
k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
@@ -587,16 +678,16 @@ k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.32.0 h1:jwOfunHIrcdYl5FRcA+uUKKtg6qiqoPCwmS2T3XTYL4=
|
||||
k8s.io/kms v0.32.0/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM=
|
||||
k8s.io/kms v0.33.0 h1:fhQSW/vyaWDhMp0vDuO/sLg2RlGZf4F77beSXcB4/eE=
|
||||
k8s.io/kms v0.33.0/go.mod h1:C1I8mjFFBNzfUZXYt9FZVJ8MJl7ynFbGgZFbBzkBJ3E=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||
k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns=
|
||||
k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI=
|
||||
k8s.io/metrics v0.33.0 h1:sKe5sC9qb1RakMhs8LWYNuN2ne6OTCWexj8Jos3rO2Y=
|
||||
k8s.io/metrics v0.33.0/go.mod h1:XewckTFXmE2AJiP7PT3EXaY7hi7bler3t2ZLyOdQYzU=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
kubevirt.io/api v1.3.0 h1:9sGElMmnRU50pGED+MPPD2OwQl4S5lvjCUjm+t0mI90=
|
||||
kubevirt.io/api v1.3.0/go.mod h1:e6LkElYZZm8NcP2gKlFVHZS9pgNhIARHIjSBSfeiP1s=
|
||||
kubevirt.io/client-go v1.3.0 h1:/HKn4exzwsctEVTwVtEFaeT9D2v4TgWr2SmxITVEZ/4=
|
||||
@@ -605,18 +696,21 @@ kubevirt.io/containerized-data-importer-api v1.60.1 h1:chmxuINvA7TPmIe8LpShCoKPx
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc=
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/mdtoc v1.1.0 h1:q3YtqYzmC2e0hgLXRIOm7/QLuPux1CX3ZHCwlbABxZo=
|
||||
sigs.k8s.io/mdtoc v1.1.0/go.mod h1:QZLVEdHH2iNIR4uHAZyvFRtjloHgVItk8lo/mzCtq3w=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.21|go1.22|go1.23') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -36,6 +36,15 @@ rules:
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -54,3 +63,16 @@ subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: descheduler-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: descheduler-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.32.1
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -18,6 +18,7 @@ package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -47,7 +48,17 @@ type DeschedulerPolicy struct {
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
MetricsCollector MetricsCollector
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
@@ -57,6 +68,12 @@ type Namespaces struct {
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
type EvictionLimits struct {
|
||||
// node restricts the maximum number of evictions per node
|
||||
Node *uint `json:"node,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
@@ -92,9 +109,54 @@ type PluginSet struct {
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
// Enabled metrics collection from Kubernetes metrics.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus
|
||||
}
|
||||
|
||||
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
|
||||
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
type Prometheus struct {
|
||||
URL string
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string
|
||||
// name is the name of the secret.
|
||||
Name string
|
||||
}
|
||||
|
||||
@@ -43,10 +43,20 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
MetricsCollector MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
@@ -74,9 +84,51 @@ type PluginSet struct {
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from kubernetes metrics.
|
||||
// Later, the collection can be extended to other providers.
|
||||
// Enabled metrics collection from Kubernetes metrics server.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken `json:"authToken,omitempty"`
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference `json:"secretReference,omitempty"`
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
// name is the name of the secret.
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
138
pkg/api/v1alpha2/zz_generated.conversion.go
generated
138
pkg/api/v1alpha2/zz_generated.conversion.go
generated
@@ -36,6 +36,16 @@ func init() {
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
|
||||
}); err != nil {
|
||||
@@ -56,6 +66,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
@@ -81,6 +101,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
@@ -99,6 +139,26 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
|
||||
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
@@ -116,9 +176,9 @@ func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Desched
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -139,9 +199,9 @@ func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.Des
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
if err := Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(&in.MetricsCollector, &out.MetricsCollector, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,6 +273,28 @@ func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCo
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = api.MetricsSource(in.Source)
|
||||
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = MetricsSource(in.Source)
|
||||
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
@@ -309,3 +391,47 @@ func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins,
|
||||
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
|
||||
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
|
||||
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
|
||||
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
97
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
97
pkg/api/v1alpha2/zz_generated.deepcopy.go
generated
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -61,7 +82,23 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -123,6 +160,27 @@ func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
@@ -187,3 +245,40 @@ func (in *Plugins) DeepCopy() *Plugins {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
118
pkg/api/zz_generated.deepcopy.go
generated
118
pkg/api/zz_generated.deepcopy.go
generated
@@ -25,6 +25,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
@@ -61,7 +82,23 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.MetricsCollector = in.MetricsCollector
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -107,6 +144,27 @@ func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
|
||||
*out = *in
|
||||
if in.Node != nil {
|
||||
in, out := &in.Node, &out.Node
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
|
||||
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EvictionLimits)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
@@ -123,6 +181,27 @@ func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
@@ -237,6 +316,27 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
@@ -258,3 +358,19 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -17,17 +17,30 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/common/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/transport"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
@@ -94,3 +107,61 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
func loadCAFile(filepath string) (*x509.CertPool, error) {
|
||||
caCert, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, fmt.Errorf("failed to append CA certificate to the pool")
|
||||
}
|
||||
|
||||
return caCertPool, nil
|
||||
}
|
||||
|
||||
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
|
||||
// Retrieve Pod CA cert
|
||||
caCertPool, err := loadCAFile(K8sPodCAFilePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
|
||||
}
|
||||
|
||||
// Get Prometheus Host
|
||||
u, err := url.Parse(prometheusURL)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
|
||||
}
|
||||
t := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
ServerName: u.Host,
|
||||
},
|
||||
}
|
||||
roundTripper := transport.NewBearerAuthRoundTripper(
|
||||
authToken,
|
||||
t,
|
||||
)
|
||||
|
||||
if authToken != "" {
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
|
||||
@@ -20,9 +20,12 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
@@ -30,18 +33,24 @@ import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -62,6 +71,11 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
prometheusAuthTokenSecretKey = "prometheusAuthToken"
|
||||
workQueueKey = "key"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
|
||||
type profileRunner struct {
|
||||
@@ -70,15 +84,21 @@ type profileRunner struct {
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
namespacedSecretsLister corev1listers.SecretNamespaceLister
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
prometheusClient promapi.Client
|
||||
previousPrometheusClientTransport *http.Transport
|
||||
queue workqueue.RateLimitingInterface
|
||||
currentPrometheusAuthToken string
|
||||
metricsProviders map[api.MetricsSource]*api.MetricsProvider
|
||||
}
|
||||
|
||||
type informerResources struct {
|
||||
@@ -125,8 +145,15 @@ func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFact
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory,
|
||||
) (*descheduler, error) {
|
||||
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider {
|
||||
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
|
||||
for _, provider := range providersList {
|
||||
providersMap[provider.Source] = &provider
|
||||
}
|
||||
return providersMap
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
@@ -157,6 +184,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
@@ -164,20 +192,7 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metricsCollector *metricscollector.MetricsCollector
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
return &descheduler{
|
||||
desch := &descheduler{
|
||||
rs: rs,
|
||||
ir: ir,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
@@ -186,8 +201,148 @@ func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedu
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
metricsCollector: metricsCollector,
|
||||
}, nil
|
||||
prometheusClient: rs.PrometheusClient,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
|
||||
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
|
||||
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
|
||||
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
if namespacedSharedInformerFactory == nil {
|
||||
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
|
||||
}
|
||||
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
|
||||
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
|
||||
}
|
||||
|
||||
return desch, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) reconcileInClusterSAToken() error {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err == nil {
|
||||
if d.currentPrometheusAuthToken != cfg.BearerToken {
|
||||
klog.V(2).Infof("Creating Prometheus client (with SA token)")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = cfg.BearerToken
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err == rest.ErrNotInCluster {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer d.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting authentication secret reconciler")
|
||||
defer klog.Infof("Shutting down authentication secret reconciler")
|
||||
|
||||
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
|
||||
for d.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
|
||||
dsKey, quit := d.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer d.queue.Done(dsKey)
|
||||
|
||||
err := d.sync()
|
||||
if err == nil {
|
||||
d.queue.Forget(dsKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
|
||||
d.queue.AddRateLimited(dsKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *descheduler) sync() error {
|
||||
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
|
||||
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
|
||||
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
ns := prometheusConfig.AuthToken.SecretReference.Namespace
|
||||
name := prometheusConfig.AuthToken.SecretReference.Name
|
||||
secretObj, err := d.namespacedSecretsLister.Get(name)
|
||||
if err != nil {
|
||||
// clear the token if the secret is not found
|
||||
if apierrors.IsNotFound(err) {
|
||||
d.currentPrometheusAuthToken = ""
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = nil
|
||||
d.prometheusClient = nil
|
||||
}
|
||||
return fmt.Errorf("unable to get %v/%v secret", ns, name)
|
||||
}
|
||||
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
|
||||
if authToken == "" {
|
||||
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
|
||||
}
|
||||
if d.currentPrometheusAuthToken == authToken {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = authToken
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
|
||||
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
@@ -267,6 +422,7 @@ func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interfac
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
frameworkprofile.WithPrometheusClient(d.prometheusClient),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
@@ -331,7 +487,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -414,6 +570,14 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
|
||||
}
|
||||
}
|
||||
|
||||
type tokenReconciliation int
|
||||
|
||||
const (
|
||||
noReconciliation tokenReconciliation = iota
|
||||
inClusterReconciliation
|
||||
secretReconciliation
|
||||
)
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
@@ -435,7 +599,22 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
|
||||
var namespacedSharedInformerFactory informers.SharedInformerFactory
|
||||
metricProviderTokenReconciliation := noReconciliation
|
||||
|
||||
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
|
||||
if prometheusProvider.Prometheus.AuthToken != nil {
|
||||
// Will get reconciled
|
||||
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
|
||||
metricProviderTokenReconciliation = secretReconciliation
|
||||
} else {
|
||||
// Use the sa token and assume it has the sufficient permissions to authenticate
|
||||
metricProviderTokenReconciliation = inClusterReconciliation
|
||||
}
|
||||
}
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
@@ -444,10 +623,17 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.Start(ctx.Done())
|
||||
}
|
||||
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
|
||||
if deschedulerPolicy.MetricsCollector.Enabled {
|
||||
if descheduler.metricsCollector != nil {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
@@ -461,7 +647,19 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
}
|
||||
}
|
||||
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
go descheduler.runAuthenticationSecretReconciler(ctx)
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
if metricProviderTokenReconciliation == inClusterReconciliation {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
if err := descheduler.reconcileInClusterSAToken(); err != nil {
|
||||
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
|
||||
@@ -136,6 +136,10 @@ func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
}
|
||||
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
var metricsSource api.MetricsSource = ""
|
||||
if metricsEnabled {
|
||||
metricsSource = api.KubernetesMetrics
|
||||
}
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
@@ -146,8 +150,8 @@ func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThreshold
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: metricsSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -189,7 +193,7 @@ func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory)
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory, nil)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
@@ -837,7 +841,7 @@ func TestLoadAwareDescheduling(t *testing.T) {
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsCollector.Enabled = true
|
||||
policy.MetricsProviders = []api.MetricsProvider{{Source: api.KubernetesMetrics}}
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
|
||||
@@ -214,6 +214,7 @@ type PodEvictor struct {
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
gracePeriodSeconds *int64
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
@@ -247,6 +248,7 @@ func NewPodEvictor(
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
gracePeriodSeconds: options.gracePeriodSeconds,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
@@ -563,7 +565,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptio
|
||||
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: pe.gracePeriodSeconds,
|
||||
}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
||||
@@ -12,6 +12,7 @@ type Options struct {
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
gracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
@@ -46,6 +47,11 @@ func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithGracePeriodSeconds(gracePeriodSeconds *int64) *Options {
|
||||
o.gracePeriodSeconds = gracePeriodSeconds
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +44,7 @@ type MetricsCollector struct {
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]map[v1.ResourceName]*resource.Quantity
|
||||
nodes map[string]api.ReferencedResourceList
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
@@ -55,7 +56,7 @@ func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset me
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
|
||||
nodes: make(map[string]api.ReferencedResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,13 +78,13 @@ func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
allNodesUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
|
||||
allNodesUsage[nodeName] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
@@ -92,7 +93,7 @@ func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*res
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
@@ -100,7 +101,7 @@ func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resou
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return map[v1.ResourceName]*resource.Quantity{
|
||||
return api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
@@ -131,7 +132,7 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
|
||||
mc.nodes[node.Name] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
|
||||
@@ -29,10 +29,11 @@ import (
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
|
||||
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
@@ -213,7 +214,7 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
resourceNames := []v1.ResourceName{v1.ResourcePods}
|
||||
for name := range podRequests {
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
@@ -236,7 +237,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if availableResources[v1.ResourcePods].MilliValue() <= 0 {
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
@@ -244,7 +245,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -253,13 +254,18 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||
}
|
||||
remainingResources := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
if IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
remainingResources[name] = resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI)
|
||||
}
|
||||
} else {
|
||||
if _, exists := node.Status.Allocatable[name]; exists {
|
||||
allocatableResource := node.Status.Allocatable[name]
|
||||
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||
@@ -273,14 +279,17 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
totalUtilization := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
totalUtilization := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
totalUtilization[name] = resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
totalUtilization[name] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
default:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -62,21 +63,22 @@ func decode(policyConfigFile string, policy []byte, client clientset.Interface,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setDefaults(*internalPolicy, registry, client)
|
||||
|
||||
return internalPolicy, nil
|
||||
return setDefaults(*internalPolicy, registry, client)
|
||||
}
|
||||
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) *api.DeschedulerPolicy {
|
||||
func setDefaults(in api.DeschedulerPolicy, registry pluginregistry.Registry, client clientset.Interface) (*api.DeschedulerPolicy, error) {
|
||||
var err error
|
||||
for idx, profile := range in.Profiles {
|
||||
// If we need to set defaults coming from loadtime in each profile we do it here
|
||||
in.Profiles[idx] = setDefaultEvictor(profile, client)
|
||||
in.Profiles[idx], err = setDefaultEvictor(profile, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
setDefaultsPluginConfig(&pluginConfig, registry)
|
||||
}
|
||||
}
|
||||
return &in
|
||||
return &in, nil
|
||||
}
|
||||
|
||||
func setDefaultsPluginConfig(pluginConfig *api.PluginConfig, registry pluginregistry.Registry) {
|
||||
@@ -97,7 +99,7 @@ func findPluginName(names []string, key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) api.DeschedulerProfile {
|
||||
func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interface) (api.DeschedulerProfile, error) {
|
||||
newPluginConfig := api.PluginConfig{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{
|
||||
@@ -128,18 +130,19 @@ func setDefaultEvictor(profile api.DeschedulerProfile, client clientset.Interfac
|
||||
thresholdPriority, err := utils.GetPriorityValueFromPriorityThreshold(context.TODO(), client, defaultevictorPluginConfig.Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold)
|
||||
if err != nil {
|
||||
klog.Error(err, "Failed to get threshold priority from args")
|
||||
return profile, err
|
||||
}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold = &api.PriorityThreshold{}
|
||||
profile.PluginConfigs[idx].Args.(*defaultevictor.DefaultEvictorArgs).PriorityThreshold.Value = &thresholdPriority
|
||||
return profile
|
||||
return profile, nil
|
||||
}
|
||||
|
||||
func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginregistry.Registry) error {
|
||||
var errorsInProfiles []error
|
||||
var errorsInPolicy []error
|
||||
for _, profile := range in.Profiles {
|
||||
for _, pluginConfig := range profile.PluginConfigs {
|
||||
if _, ok := registry[pluginConfig.Name]; !ok {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: plugin %s in pluginConfig not registered", profile.Name, pluginConfig.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -148,9 +151,46 @@ func validateDeschedulerConfiguration(in api.DeschedulerPolicy, registry pluginr
|
||||
continue
|
||||
}
|
||||
if err := pluginUtilities.PluginArgValidator(pluginConfig.Args); err != nil {
|
||||
errorsInProfiles = append(errorsInProfiles, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("in profile %s: %s", profile.Name, err.Error()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errorsInProfiles)
|
||||
providers := map[api.MetricsSource]api.MetricsProvider{}
|
||||
for _, provider := range in.MetricsProviders {
|
||||
if _, ok := providers[provider.Source]; ok {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("metric provider %q is already configured, each source can be configured only once", provider.Source))
|
||||
} else {
|
||||
providers[provider.Source] = provider
|
||||
}
|
||||
}
|
||||
if _, exists := providers[api.KubernetesMetrics]; exists && in.MetricsCollector != nil && in.MetricsCollector.Enabled {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"))
|
||||
}
|
||||
if prometheusConfig, exists := providers[api.PrometheusMetrics]; exists {
|
||||
if prometheusConfig.Prometheus == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus configuration is required when prometheus source is enabled"))
|
||||
} else {
|
||||
if prometheusConfig.Prometheus.URL == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL is required when prometheus is enabled"))
|
||||
} else {
|
||||
u, err := url.Parse(prometheusConfig.Prometheus.URL)
|
||||
if err != nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("error parsing prometheus URL: %v", err))
|
||||
} else if u.Scheme != "https" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus URL's scheme is not https, got %q instead", u.Scheme))
|
||||
}
|
||||
}
|
||||
|
||||
if prometheusConfig.Prometheus.AuthToken != nil {
|
||||
secretRef := prometheusConfig.Prometheus.AuthToken.SecretReference
|
||||
if secretRef == nil {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"))
|
||||
} else if secretRef.Name == "" || secretRef.Namespace == "" {
|
||||
errorsInPolicy = append(errorsInPolicy, fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errorsInPolicy)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -121,6 +122,25 @@ profiles:
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "v1alpha2 to internal, validate error handling (priorityThreshold exceeding maximum)",
|
||||
policy: []byte(`apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
args:
|
||||
priorityThreshold:
|
||||
value: 2000000001
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
`),
|
||||
result: nil,
|
||||
err: errors.New("priority threshold can't be greater than 2000000000"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -191,12 +211,167 @@ func TestValidateDeschedulerConfiguration(t *testing.T) {
|
||||
},
|
||||
result: fmt.Errorf("[in profile RemoveFailedPods: only one of Include/Exclude namespaces can be set, in profile RemovePodsViolatingTopologySpreadConstraint: only one of Include/Exclude namespaces can be set]"),
|
||||
},
|
||||
{
|
||||
description: "Duplicit metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("metric provider \"KubernetesMetrics\" is already configured, each source can be configured only once"),
|
||||
},
|
||||
{
|
||||
description: "Too many metrics providers error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsCollector: &api.MetricsCollector{
|
||||
Enabled: true,
|
||||
},
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{Source: api.KubernetesMetrics},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("it is not allowed to combine metrics provider when metrics collector is enabled"),
|
||||
},
|
||||
{
|
||||
description: "missing prometheus url error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus URL is required when prometheus is enabled"),
|
||||
},
|
||||
{
|
||||
description: "prometheus url is not valid error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "http://example.com:-80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("error parsing prometheus URL: parse \"http://example.com:-80\": invalid port \":-80\" after host"),
|
||||
},
|
||||
{
|
||||
description: "prometheus url does not have https error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "http://example.com:80",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus URL's scheme is not https, got \"http\" instead"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with no secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret is expected to be set when authToken field is"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken with empty secret reference error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference namespace error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "prometheus authtoken missing secret reference name error",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
result: fmt.Errorf("prometheus authToken secret reference does not set both namespace and name"),
|
||||
},
|
||||
{
|
||||
description: "valid prometheus authtoken secret reference",
|
||||
deschedulerPolicy: api.DeschedulerPolicy{
|
||||
MetricsProviders: []api.MetricsProvider{
|
||||
{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &api.Prometheus{
|
||||
URL: "https://example.com:80",
|
||||
AuthToken: &api.AuthToken{
|
||||
SecretReference: &api.SecretReference{
|
||||
Name: "secretname",
|
||||
Namespace: "secretnamespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
result := validateDeschedulerConfiguration(tc.deschedulerPolicy, pluginregistry.PluginRegistry)
|
||||
if result.Error() != tc.result.Error() {
|
||||
if result == nil && tc.result != nil || result != nil && tc.result == nil {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
} else if result == nil && tc.result == nil {
|
||||
return
|
||||
} else if result.Error() != tc.result.Error() {
|
||||
t.Errorf("test '%s' failed. expected \n'%s', got \n'%s'", tc.description, tc.result, result)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -11,6 +11,8 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
type HandleImpl struct {
|
||||
@@ -20,6 +22,7 @@ type HandleImpl struct {
|
||||
EvictorFilterImpl frameworktypes.EvictorPlugin
|
||||
PodEvictorImpl *evictions.PodEvictor
|
||||
MetricsCollectorImpl *metricscollector.MetricsCollector
|
||||
PrometheusClientImpl promapi.Client
|
||||
}
|
||||
|
||||
var _ frameworktypes.Handle = &HandleImpl{}
|
||||
@@ -28,6 +31,10 @@ func (hi *HandleImpl) ClientSet() clientset.Interface {
|
||||
return hi.ClientsetImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) PrometheusClient() promapi.Client {
|
||||
return hi.PrometheusClientImpl
|
||||
}
|
||||
|
||||
func (hi *HandleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.MetricsCollectorImpl
|
||||
}
|
||||
|
||||
@@ -44,6 +44,7 @@ type testCase struct {
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
ignorePvcPods bool
|
||||
priorityThreshold *int32
|
||||
nodeFit bool
|
||||
minReplicas uint
|
||||
@@ -769,6 +770,38 @@ func TestDefaultEvictorFilter(t *testing.T) {
|
||||
},
|
||||
ignorePodsWithoutPDB: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "ignorePvcPods is set, pod with PVC, not evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
ignorePvcPods: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "ignorePvcPods is not set, pod with PVC, evicts",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "pvc", VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
ignorePvcPods: false,
|
||||
result: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -862,7 +895,7 @@ func initializePlugin(ctx context.Context, test testCase) (frameworktypes.Plugin
|
||||
defaultEvictorArgs := &DefaultEvictorArgs{
|
||||
EvictLocalStoragePods: test.evictLocalStoragePods,
|
||||
EvictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
IgnorePvcPods: false,
|
||||
IgnorePvcPods: test.ignorePvcPods,
|
||||
EvictFailedBarePods: test.evictFailedBarePods,
|
||||
PriorityThreshold: &api.PriorityThreshold{
|
||||
Value: test.priorityThreshold,
|
||||
|
||||
90
pkg/framework/plugins/example/README.md
Normal file
90
pkg/framework/plugins/example/README.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Descheduler Plugin: Example Implementation
|
||||
|
||||
This directory provides an example plugin for the Kubernetes Descheduler,
|
||||
demonstrating how to evict pods based on custom criteria. The plugin targets
|
||||
pods based on:
|
||||
|
||||
* **Name Regex:** Pods matching a specified regular expression.
|
||||
* **Age:** Pods older than a defined duration.
|
||||
* **Namespace:** Pods within or outside a given list of namespaces (inclusion
|
||||
or exclusion).
|
||||
|
||||
## Building and Integrating the Plugin
|
||||
|
||||
To incorporate this plugin into your Descheduler build, you must register it
|
||||
within the Descheduler's plugin registry. Follow these steps:
|
||||
|
||||
1. **Register the Plugin:**
|
||||
* Modify the `pkg/descheduler/setupplugins.go` file.
|
||||
* Add the following registration line to the end of the
|
||||
`RegisterDefaultPlugins()` function:
|
||||
|
||||
```go
|
||||
pluginregistry.Register(
|
||||
example.PluginName,
|
||||
example.New,
|
||||
&example.Example{},
|
||||
&example.ExampleArgs{},
|
||||
example.ValidateExampleArgs,
|
||||
example.SetDefaults_Example,
|
||||
registry,
|
||||
)
|
||||
```
|
||||
|
||||
2. **Generate Code:**
|
||||
* If you modify the plugin's code, execute `make gen` before rebuilding the
|
||||
Descheduler. This ensures generated code is up-to-date.
|
||||
|
||||
3. **Rebuild the Descheduler:**
|
||||
* Build the descheduler with your changes.
|
||||
|
||||
## Plugin Configuration
|
||||
|
||||
Configure the plugin's behavior using the Descheduler's policy configuration.
|
||||
Here's an example:
|
||||
|
||||
```yaml
|
||||
apiVersion: descheduler/v1alpha2
|
||||
kind: DeschedulerPolicy
|
||||
profiles:
|
||||
- name: LifecycleAndUtilization
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- Example
|
||||
pluginConfig:
|
||||
- name: Example
|
||||
args:
|
||||
regex: ^descheduler-test.*$
|
||||
maxAge: 3m
|
||||
namespaces:
|
||||
include:
|
||||
- default
|
||||
```
|
||||
|
||||
## Explanation
|
||||
|
||||
- `regex: ^descheduler-test.*$`: Evicts pods whose names match the regular
|
||||
expression `^descheduler-test.*$`.
|
||||
- `maxAge: 3m`: Evicts pods older than 3 minutes.
|
||||
- `namespaces.include: - default`: Evicts pods within the default namespace.
|
||||
|
||||
This configuration will cause the plugin to evict pods that meet all three
|
||||
criteria: matching the `regex`, exceeding the `maxAge`, and residing in the
|
||||
specified namespace.
|
||||
|
||||
## Notes
|
||||
|
||||
- This plugin is configured through the `ExampleArgs` struct, which defines the
|
||||
plugin's parameters.
|
||||
- Plugins must implement a function to validate and another to set the default
|
||||
values for their `Args` struct.
|
||||
- The fields in the `ExampleArgs` struct reflect directly into the
|
||||
`DeschedulerPolicy` configuration.
|
||||
- Plugins must comply with the `DeschedulePlugin` interface to be registered
|
||||
with the Descheduler.
|
||||
- The main functionality of the plugin is implemented in the `Deschedule()`
|
||||
method, which is called by the Descheduler when the plugin is executed.
|
||||
- A good amount of descheduling logic can be achieved by means of filters.
|
||||
- Whenever a change in the Plugin's configuration is made the developer should
|
||||
regenerate the code by running `make gen`.
|
||||
36
pkg/framework/plugins/example/defaults.go
Normal file
36
pkg/framework/plugins/example/defaults.go
Normal file
@@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
// SetDefaults_Example sets the default arguments for the Example plugin. On
|
||||
// this case we set the default regex to match only empty strings (this should
|
||||
// not ever match anything). The default maximum age for pods is set to 5
|
||||
// minutes.
|
||||
func SetDefaults_Example(obj runtime.Object) {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
args.Regex = "^$"
|
||||
}
|
||||
if args.MaxAge == "" {
|
||||
args.MaxAge = "5m"
|
||||
}
|
||||
}
|
||||
16
pkg/framework/plugins/example/docs.go
Normal file
16
pkg/framework/plugins/example/docs.go
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
package example
|
||||
170
pkg/framework/plugins/example/example.go
Normal file
170
pkg/framework/plugins/example/example.go
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
fwtypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
)
|
||||
|
||||
// PluginName is used when registering the plugin. You need to choose a unique
|
||||
// name across all plugins. This name is used to identify the plugin config in
|
||||
// the descheduler policy.
|
||||
const PluginName = "Example"
|
||||
|
||||
// We need to ensure that the plugin struct complies with the DeschedulePlugin
|
||||
// interface. This prevent unexpected changes that may render this type
|
||||
// incompatible.
|
||||
var _ fwtypes.DeschedulePlugin = &Example{}
|
||||
|
||||
// Example is our plugin (implementing the DeschedulePlugin interface). This
|
||||
// plugin will evict pods that match a regex and are older than a certain age.
|
||||
type Example struct {
|
||||
handle fwtypes.Handle
|
||||
args *ExampleArgs
|
||||
podFilter podutil.FilterFunc
|
||||
}
|
||||
|
||||
// New builds a plugin instance from its arguments. Arguments are passed in as
|
||||
// a runtime.Object. Handle is used by plugins to retrieve a kubernetes client
|
||||
// set, evictor interface, shared informer factory and other instruments shared
|
||||
// across different plugins.
|
||||
func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
|
||||
// make sure we are receiving the right argument type.
|
||||
exampleArgs, ok := args.(*ExampleArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
|
||||
}
|
||||
|
||||
// we can use the included and excluded namespaces to filter the pods we want
|
||||
// to evict.
|
||||
var includedNamespaces, excludedNamespaces sets.Set[string]
|
||||
if exampleArgs.Namespaces != nil {
|
||||
includedNamespaces = sets.New(exampleArgs.Namespaces.Include...)
|
||||
excludedNamespaces = sets.New(exampleArgs.Namespaces.Exclude...)
|
||||
}
|
||||
|
||||
// here we create a pod filter that will return only pods that can be
|
||||
// evicted (according to the evictor and inside the namespaces we want).
|
||||
// NOTE: here we could also add a function to filter out by the regex and
|
||||
// age but for sake of the example we are keeping it simple and filtering
|
||||
// those out in the Deschedule() function.
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithNamespaces(includedNamespaces).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
WithFilter(
|
||||
podutil.WrapFilterFuncs(
|
||||
handle.Evictor().Filter,
|
||||
handle.Evictor().PreEvictionFilter,
|
||||
),
|
||||
).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
return &Example{
|
||||
handle: handle,
|
||||
podFilter: podFilter,
|
||||
args: exampleArgs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name returns the plugin name.
|
||||
func (d *Example) Name() string {
|
||||
return PluginName
|
||||
}
|
||||
|
||||
// Deschedule is the function where most of the logic around eviction is laid
|
||||
// down. Here we go through all pods in all nodes and evict the ones that match
|
||||
// the regex and are older than the maximum age. This function receives a list
|
||||
// of nodes we need to process.
|
||||
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
|
||||
var podsToEvict []*v1.Pod
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Example plugin starting descheduling")
|
||||
|
||||
re, err := regexp.Compile(d.args.Regex)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to compile regex: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
duration, err := time.ParseDuration(d.args.MaxAge)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to parse max age: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// here we create an auxiliar filter to remove all pods that don't
|
||||
// match the provided regex or are still too young to be evicted.
|
||||
// This filter will be used when we list all pods on a node. This
|
||||
// filter here could have been part of the podFilter but we are
|
||||
// keeping it separate for the sake of the example.
|
||||
filter := func(pod *v1.Pod) bool {
|
||||
if !re.MatchString(pod.Name) {
|
||||
return false
|
||||
}
|
||||
deadline := pod.CreationTimestamp.Add(duration)
|
||||
return time.Now().After(deadline)
|
||||
}
|
||||
|
||||
// go node by node getting all pods that we can evict.
|
||||
for _, node := range nodes {
|
||||
// ListAllPodsOnANode is a helper function that retrieves all
|
||||
// pods filtering out the ones we can't evict. We merge the
|
||||
// default filters with the one we created above.
|
||||
pods, err := podutil.ListAllPodsOnANode(
|
||||
node.Name,
|
||||
d.handle.GetPodsAssignedToNodeFunc(),
|
||||
podutil.WrapFilterFuncs(d.podFilter, filter),
|
||||
)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("fail to list pods: %w", err)
|
||||
return &fwtypes.Status{Err: err}
|
||||
}
|
||||
|
||||
// as we have already filtered out pods that don't match the
|
||||
// regex or are too young we can simply add them all to the
|
||||
// eviction list.
|
||||
podsToEvict = append(podsToEvict, pods...)
|
||||
}
|
||||
|
||||
// evict all the pods.
|
||||
for _, pod := range podsToEvict {
|
||||
logger.Info("Example plugin evicting pod", "pod", klog.KObj(pod))
|
||||
opts := evictions.EvictOptions{StrategyName: PluginName}
|
||||
if err := d.handle.Evictor().Evict(ctx, pod, opts); err != nil {
|
||||
logger.Error(err, "unable to evict pod", "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Example plugin finished descheduling")
|
||||
return nil
|
||||
}
|
||||
31
pkg/framework/plugins/example/register.go
Normal file
31
pkg/framework/plugins/example/register.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
var (
|
||||
SchemeBuilder = runtime.NewSchemeBuilder()
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
||||
45
pkg/framework/plugins/example/types.go
Normal file
45
pkg/framework/plugins/example/types.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ExampleArgs holds a list of arguments used to configure the plugin. For this
|
||||
// simple example we only care about a regex, a maximum age and possibly a list
|
||||
// of namespaces to which we want to apply the descheduler. This plugin evicts
|
||||
// pods that match a given regular expression and are older than the maximum
|
||||
// allowed age. Most of the fields here were defined as strings so we can
|
||||
// validate them somewhere else (show you a better implementation example).
|
||||
type ExampleArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Regex is a regular expression we use to match against pod names. If
|
||||
// the pod name matches the regex it will be evicted. This is expected
|
||||
// to be a valid regular expression (according to go's regexp package).
|
||||
Regex string `json:"regex"`
|
||||
|
||||
// MaxAge is the maximum age a pod can have before it is considered for
|
||||
// eviction. This is expected to be a valid time.Duration.
|
||||
MaxAge string `json:"maxAge"`
|
||||
|
||||
// Namespaces allows us to filter on which namespaces we want to apply
|
||||
// the descheduler.
|
||||
Namespaces *api.Namespaces `json:"namespaces,omitempty"`
|
||||
}
|
||||
45
pkg/framework/plugins/example/validation.go
Normal file
45
pkg/framework/plugins/example/validation.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// ValidateExampleArgs validates if the plugin arguments are correct (we have
|
||||
// everything we need). On this case we only validate if we have a valid
|
||||
// regular expression and maximum age.
|
||||
func ValidateExampleArgs(obj runtime.Object) error {
|
||||
args := obj.(*ExampleArgs)
|
||||
if args.Regex == "" {
|
||||
return fmt.Errorf("regex argument must be set")
|
||||
}
|
||||
|
||||
if _, err := regexp.Compile(args.Regex); err != nil {
|
||||
return fmt.Errorf("invalid regex: %v", err)
|
||||
}
|
||||
|
||||
if _, err := time.ParseDuration(args.MaxAge); err != nil {
|
||||
return fmt.Errorf("invalid max age: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
57
pkg/framework/plugins/example/zz_generated.deepcopy.go
generated
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExampleArgs) DeepCopyInto(out *ExampleArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleArgs.
|
||||
func (in *ExampleArgs) DeepCopy() *ExampleArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExampleArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ExampleArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
33
pkg/framework/plugins/example/zz_generated.defaults.go
generated
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package example
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
// Classifier is a function that classifies a resource usage based on a limit.
|
||||
// The function should return true if the resource usage matches the classifier
|
||||
// intent.
|
||||
type Classifier[K comparable, V any] func(K, V, V) bool
|
||||
|
||||
// Comparer is a function that compares two objects. This function should return
|
||||
// -1 if the first object is less than the second, 0 if they are equal, and 1 if
|
||||
// the first object is greater than the second. Of course this is a simplification
|
||||
// and any value between -1 and 1 can be returned.
|
||||
type Comparer[V any] func(V, V) int
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Limits is a map of list of limits indexed by a comparable key. Each limit
|
||||
// inside the list requires a classifier to evaluate.
|
||||
type Limits[K comparable, V any] map[K][]V
|
||||
|
||||
// Classify is a function that classifies based on classifier functions. This
|
||||
// function receives Values, a list of n Limits (indexed by name), and a list
|
||||
// of n Classifiers. The classifier at n position is called to evaluate the
|
||||
// limit at n position. The first classifier to return true will receive the
|
||||
// value, at this point the loop will break and the next value will be
|
||||
// evaluated. This function returns a slice of maps, each position in the
|
||||
// returned slice correspond to one of the classifiers (e.g. if n limits
|
||||
// and classifiers are provided, the returned slice will have n maps).
|
||||
func Classify[K comparable, V any](
|
||||
values Values[K, V], limits Limits[K, V], classifiers ...Classifier[K, V],
|
||||
) []map[K]V {
|
||||
result := make([]map[K]V, len(classifiers))
|
||||
for i := range classifiers {
|
||||
result[i] = make(map[K]V)
|
||||
}
|
||||
|
||||
for index, usage := range values {
|
||||
for i, limit := range limits[index] {
|
||||
if len(classifiers) <= i {
|
||||
continue
|
||||
}
|
||||
if !classifiers[i](index, usage, limit) {
|
||||
continue
|
||||
}
|
||||
result[i][index] = usage
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ForMap is a function that returns a classifier that compares all values in a
|
||||
// map. The function receives a Comparer function that is used to compare all
|
||||
// the map values. The returned Classifier will return true only if the
|
||||
// provided Comparer function returns a value less than 0 for all the values.
|
||||
func ForMap[K, I comparable, V any, M ~map[I]V](cmp Comparer[V]) Classifier[K, M] {
|
||||
return func(_ K, usages, limits M) bool {
|
||||
for idx, usage := range usages {
|
||||
if limit, ok := limits[idx]; ok {
|
||||
if cmp(usage, limit) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,739 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package classifier
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestClassifySimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]int
|
||||
limits map[string][]int
|
||||
classifiers []Classifier[string, int]
|
||||
expected []map[string]int
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]int{},
|
||||
limits: map[string][]int{},
|
||||
expected: []map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "one under one over",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{"node1": 2},
|
||||
{"node2": 8},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]int{
|
||||
"node1": 2,
|
||||
"node2": 8,
|
||||
"node3": 2,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
limits: map[string][]int{
|
||||
"node1": {4, 6},
|
||||
"node2": {4, 6},
|
||||
"node3": {4, 6},
|
||||
"node4": {4, 6},
|
||||
"node5": {4, 6},
|
||||
"node6": {4, 6},
|
||||
"node7": {4, 6},
|
||||
"node8": {4, 6},
|
||||
"node9": {4, 6},
|
||||
},
|
||||
expected: []map[string]int{
|
||||
{
|
||||
"node1": 2,
|
||||
"node3": 2,
|
||||
"node6": 2,
|
||||
"node7": 2,
|
||||
},
|
||||
{
|
||||
"node2": 8,
|
||||
"node4": 8,
|
||||
"node5": 8,
|
||||
"node8": 8,
|
||||
"node9": 8,
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, int]{
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage < limit
|
||||
},
|
||||
func(_ string, usage, limit int) bool {
|
||||
return usage > limit
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify_pointers(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]map[v1.ResourceName]*resource.Quantity
|
||||
limits map[string][]map[v1.ResourceName]*resource.Quantity
|
||||
classifiers []Classifier[string, map[v1.ResourceName]*resource.Quantity]
|
||||
expected []map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("5")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("5Gi")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("8")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("8Gi")),
|
||||
},
|
||||
},
|
||||
limits: map[string][]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("4")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("4Gi")),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("16")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("16Gi")),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("2")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("2Gi")),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, map[v1.ResourceName]*resource.Quantity]{
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return usage.Cmp(*limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, *resource.Quantity, map[v1.ResourceName]*resource.Quantity](
|
||||
func(usage, limit *resource.Quantity) int {
|
||||
return limit.Cmp(*usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string][]v1.ResourceList
|
||||
classifiers []Classifier[string, v1.ResourceList]
|
||||
expected []map[string]v1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string][]v1.ResourceList{},
|
||||
expected: []map[string]v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "single underutilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more classifiers than limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("80"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("30"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and properly utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("5Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("16"),
|
||||
v1.ResourceMemory: resource.MustParse("16Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single underutilized and multiple over utilized nodes",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node3": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
{
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "over and under at the same time",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
"node2": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only memory over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("5"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
v1.ResourceMemory: resource.MustParse("6Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{},
|
||||
{},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "randomly positioned over utilized",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node9": {v1.ResourceCPU: resource.MustParse("5")},
|
||||
},
|
||||
limits: map[string][]v1.ResourceList{
|
||||
"node1": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node2": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node3": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node4": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node5": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node6": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node7": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node8": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
"node9": {
|
||||
{v1.ResourceCPU: resource.MustParse("4")},
|
||||
{v1.ResourceCPU: resource.MustParse("6")},
|
||||
},
|
||||
},
|
||||
expected: []map[string]v1.ResourceList{
|
||||
{
|
||||
"node2": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node4": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
"node8": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node3": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node5": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node6": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
"node7": {v1.ResourceCPU: resource.MustParse("8")},
|
||||
},
|
||||
},
|
||||
classifiers: []Classifier[string, v1.ResourceList]{
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return usage.Cmp(limit)
|
||||
},
|
||||
),
|
||||
ForMap[string, v1.ResourceName, resource.Quantity, v1.ResourceList](
|
||||
func(usage, limit resource.Quantity) int {
|
||||
return limit.Cmp(usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Classify(tt.usage, tt.limits, tt.classifiers...)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -19,9 +19,9 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -29,164 +29,242 @@ import (
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const HighNodeUtilizationPluginName = "HighNodeUtilization"
|
||||
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its plugin.
|
||||
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type HighNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
targetThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewHighNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
highNodeUtilizatioArgs, ok := args.(*HighNodeUtilizationArgs)
|
||||
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler
|
||||
// can schedule according to its plugin. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
type HighNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *HighNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
criteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
highThresholds api.ResourceThresholds
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewHighNodeUtilization builds plugin from its arguments while passing a handle.
|
||||
func NewHighNodeUtilization(
|
||||
genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*HighNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type HighNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type HighNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
|
||||
targetThresholds := make(api.ResourceThresholds)
|
||||
setDefaultForThresholds(highNodeUtilizatioArgs.Thresholds, targetThresholds)
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", highNodeUtilizatioArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", highNodeUtilizatioArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", highNodeUtilizatioArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range highNodeUtilizatioArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(highNodeUtilizatioArgs.Thresholds[name]))
|
||||
}
|
||||
// this plugins worries only about thresholds but the nodeplugins
|
||||
// package was made to take two thresholds into account, one for low
|
||||
// and another for high usage. here we make sure we set the high
|
||||
// threshold to the maximum value for all resources for which we have a
|
||||
// threshold.
|
||||
highThresholds := make(api.ResourceThresholds)
|
||||
for rname := range args.Thresholds {
|
||||
highThresholds[rname] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
// get the resource names for which we have a threshold. this is
|
||||
// later used when determining if we are going to evict a pod.
|
||||
resourceThresholds := getResourceNames(args.Thresholds)
|
||||
|
||||
// by default we evict pods from the under utilized nodes even if they
|
||||
// don't define a request for a given threshold. this works most of the
|
||||
// times and there is an use case for it. When using the restrict mode
|
||||
// we evaluate if the pod has a request for any of the resources the
|
||||
// user has provided as threshold.
|
||||
filters := []podutil.FilterFunc{handle.Evictor().Filter}
|
||||
if slices.Contains(args.EvictionModes, EvictionModeOnlyThresholdingResources) {
|
||||
filters = append(
|
||||
filters,
|
||||
withResourceRequestForAny(resourceThresholds...),
|
||||
)
|
||||
}
|
||||
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(podutil.WrapFilterFuncs(filters...)).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
// resourceNames is a list of all resource names this plugin cares
|
||||
// about. we care about the resources for which we have a threshold and
|
||||
// all we consider the basic resources (cpu, memory, pods).
|
||||
resourceNames := uniquifyResourceNames(
|
||||
append(
|
||||
resourceThresholds,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
|
||||
return &HighNodeUtilization{
|
||||
handle: handle,
|
||||
args: highNodeUtilizatioArgs,
|
||||
resourceNames: resourceNames,
|
||||
targetThresholds: targetThresholds,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc()),
|
||||
handle: handle,
|
||||
args: args,
|
||||
resourceNames: resourceNames,
|
||||
highThresholds: highThresholds,
|
||||
criteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
podFilter: podFilter,
|
||||
usageClient: newRequestedUsageClient(
|
||||
resourceNames,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (h *HighNodeUtilization) Name() string {
|
||||
return HighNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from under
|
||||
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
|
||||
// less nodes are used.
|
||||
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := h.usageClient.sync(nodes); err != nil {
|
||||
if err := h.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, h.usageClient),
|
||||
getNodeThresholds(nodes, h.args.Thresholds, h.targetThresholds, h.resourceNames, false, h.usageClient),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
// take a picture of the current state of the nodes, everything else
|
||||
// here is based on this snapshot.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, h.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// node usages are not presented as percentages over the capacity.
|
||||
// we need to normalize them to be able to compare them with the
|
||||
// thresholds. thresholds are already provided by the user in
|
||||
// percentage.
|
||||
usage, thresholds := assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap, capacities, h.args.Thresholds, h.highThresholds,
|
||||
)
|
||||
|
||||
// classify nodes in two groups: underutilized and schedulable. we will
|
||||
// later try to move pods from the first group to the second.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilized nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
|
||||
// schedulable nodes.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
klog.V(2).InfoS(
|
||||
"Node is unschedulable",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
})
|
||||
return true
|
||||
},
|
||||
)
|
||||
|
||||
// log message in one line
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", h.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(sourceNodes))
|
||||
// the nodeplugin package works by means of NodeInfo structures. these
|
||||
// structures hold a series of information about the nodes. now that
|
||||
// we have classified the nodes, we can build the NodeInfo structures
|
||||
// for each group. NodeInfo structs carry usage and available resources
|
||||
// for each node.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
category := []string{"underutilized", "overutilized"}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
klog.InfoS(
|
||||
"Node has been classified",
|
||||
"category", category[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
h.resourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", h.args.NumberOfNodes)
|
||||
|
||||
if len(lowNodes) <= h.args.NumberOfNodes {
|
||||
klog.V(1).InfoS(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", h.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if len(sourceNodes) == len(nodes) {
|
||||
|
||||
if len(lowNodes) == len(nodes) {
|
||||
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
if len(highNodes) == 0 {
|
||||
|
||||
if len(schedulableNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
// stops the eviction process if the total available capacity sage has
|
||||
// dropped to zero - no more pods can be scheduled. this will signalize
|
||||
// to stop if any of the available resources has dropped to zero.
|
||||
continueEvictionCond := func(_ NodeInfo, avail api.ReferencedResourceList) bool {
|
||||
for name := range avail {
|
||||
if avail[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in ascending order
|
||||
sortNodesByUsage(sourceNodes, true)
|
||||
// sorts the nodes by the usage in ascending order.
|
||||
sortNodesByUsage(lowNodes, true)
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
h.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
schedulableNodes,
|
||||
h.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: HighNodeUtilizationPluginName},
|
||||
h.podFilter,
|
||||
h.resourceNames,
|
||||
continueEvictionCond,
|
||||
h.usageClient,
|
||||
nil,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
// Default targetThreshold resource values to 100
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
evictionModes []EvictionMode
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
@@ -244,7 +245,7 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.MakeBestEffortPod(pod)
|
||||
}),
|
||||
@@ -433,6 +434,53 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource threshold and no extended resource pods",
|
||||
thresholds: api.ResourceThresholds{
|
||||
extendedResource: 40,
|
||||
},
|
||||
evictionModes: []EvictionMode{EvictionModeOnlyThresholdingResources},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 10)
|
||||
}),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// pods on node1 have the extended resource
|
||||
// request set and they put the node in the
|
||||
// over utilization range.
|
||||
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 3)
|
||||
}),
|
||||
// pods in the other nodes must not be evicted
|
||||
// because they do not have the extended
|
||||
// resource defined in their requests.
|
||||
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -474,10 +522,13 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
plugin, err := NewHighNodeUtilization(&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
},
|
||||
handle)
|
||||
plugin, err := NewHighNodeUtilization(
|
||||
&HighNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
EvictionModes: testCase.evictionModes,
|
||||
},
|
||||
handle,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@@ -29,131 +28,241 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
)
|
||||
|
||||
const LowNodeUtilizationPluginName = "LowNodeUtilization"
|
||||
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
|
||||
// to calculate nodes' utilization and not the actual resource usage.
|
||||
|
||||
type LowNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underutilizationCriteria []interface{}
|
||||
overutilizationCriteria []interface{}
|
||||
resourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// this lines makes sure that HighNodeUtilization implements the BalancePlugin
|
||||
// interface.
|
||||
var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a handle
|
||||
func NewLowNodeUtilization(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plugin, error) {
|
||||
lowNodeUtilizationArgsArgs, ok := args.(*LowNodeUtilizationArgs)
|
||||
// LowNodeUtilization evicts pods from overutilized nodes to underutilized
|
||||
// nodes. Note that CPU/Memory requests are used to calculate nodes'
|
||||
// utilization and not the actual resource usage.
|
||||
type LowNodeUtilization struct {
|
||||
handle frameworktypes.Handle
|
||||
args *LowNodeUtilizationArgs
|
||||
podFilter func(pod *v1.Pod) bool
|
||||
underCriteria []any
|
||||
overCriteria []any
|
||||
resourceNames []v1.ResourceName
|
||||
extendedResourceNames []v1.ResourceName
|
||||
usageClient usageClient
|
||||
}
|
||||
|
||||
// NewLowNodeUtilization builds plugin from its arguments while passing a
|
||||
// handle. this plugin aims to move workload from overutilized nodes to
|
||||
// underutilized nodes.
|
||||
func NewLowNodeUtilization(
|
||||
genericArgs runtime.Object, handle frameworktypes.Handle,
|
||||
) (frameworktypes.Plugin, error) {
|
||||
args, ok := genericArgs.(*LowNodeUtilizationArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type LowNodeUtilizationArgs, got %T", args)
|
||||
return nil, fmt.Errorf(
|
||||
"want args to be of type LowNodeUtilizationArgs, got %T",
|
||||
genericArgs,
|
||||
)
|
||||
}
|
||||
|
||||
setDefaultForLNUThresholds(lowNodeUtilizationArgsArgs.Thresholds, lowNodeUtilizationArgsArgs.TargetThresholds, lowNodeUtilizationArgsArgs.UseDeviationThresholds)
|
||||
// resourceNames holds a list of resources for which the user has
|
||||
// provided thresholds for. extendedResourceNames holds those as well
|
||||
// as cpu, memory and pods if no prometheus collection is used.
|
||||
resourceNames := getResourceNames(args.Thresholds)
|
||||
extendedResourceNames := resourceNames
|
||||
|
||||
underutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.Thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.Thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
underutilizationCriteria = append(underutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.Thresholds[name]))
|
||||
// if we are using prometheus we need to validate we have everything we
|
||||
// need. if we aren't then we need to make sure we are also collecting
|
||||
// data for cpu, memory and pods.
|
||||
metrics := args.MetricsUtilization
|
||||
if metrics != nil && metrics.Source == api.PrometheusMetrics {
|
||||
if err := validatePrometheusMetricsUtilization(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
extendedResourceNames = uniquifyResourceNames(
|
||||
append(
|
||||
resourceNames,
|
||||
v1.ResourceCPU,
|
||||
v1.ResourceMemory,
|
||||
v1.ResourcePods,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
overutilizationCriteria := []interface{}{
|
||||
"CPU", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceCPU],
|
||||
"Mem", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourceMemory],
|
||||
"Pods", lowNodeUtilizationArgsArgs.TargetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range lowNodeUtilizationArgsArgs.TargetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
overutilizationCriteria = append(overutilizationCriteria, string(name), int64(lowNodeUtilizationArgsArgs.TargetThresholds[name]))
|
||||
}
|
||||
}
|
||||
|
||||
podFilter, err := podutil.NewOptions().
|
||||
podFilter, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(handle.Evictor().Filter).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing pod filter function: %v", err)
|
||||
}
|
||||
|
||||
resourceNames := getResourceNames(lowNodeUtilizationArgsArgs.Thresholds)
|
||||
|
||||
var usageClient usageClient
|
||||
if lowNodeUtilizationArgsArgs.MetricsUtilization.MetricsServer {
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
// this plugins supports different ways of collecting usage data. each
|
||||
// different way provides its own "usageClient". here we make sure we
|
||||
// have the correct one or an error is triggered. XXX MetricsServer is
|
||||
// deprecated, removed once dropped.
|
||||
var usageClient usageClient = newRequestedUsageClient(
|
||||
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(),
|
||||
)
|
||||
if metrics != nil {
|
||||
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
usageClient = newActualUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc(), handle.MetricsCollector())
|
||||
} else {
|
||||
usageClient = newRequestedUsageClient(resourceNames, handle.GetPodsAssignedToNodeFunc())
|
||||
}
|
||||
|
||||
return &LowNodeUtilization{
|
||||
handle: handle,
|
||||
args: lowNodeUtilizationArgsArgs,
|
||||
underutilizationCriteria: underutilizationCriteria,
|
||||
overutilizationCriteria: overutilizationCriteria,
|
||||
resourceNames: resourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
handle: handle,
|
||||
args: args,
|
||||
underCriteria: thresholdsToKeysAndValues(args.Thresholds),
|
||||
overCriteria: thresholdsToKeysAndValues(args.TargetThresholds),
|
||||
resourceNames: resourceNames,
|
||||
extendedResourceNames: extendedResourceNames,
|
||||
podFilter: podFilter,
|
||||
usageClient: usageClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name retrieves the plugin name
|
||||
// Name retrieves the plugin name.
|
||||
func (l *LowNodeUtilization) Name() string {
|
||||
return LowNodeUtilizationPluginName
|
||||
}
|
||||
|
||||
// Balance extension point implementation for the plugin
|
||||
// Balance holds the main logic of the plugin. It evicts pods from over
|
||||
// utilized nodes to under utilized nodes. The goal here is to evenly
|
||||
// distribute pods across nodes.
|
||||
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
|
||||
if err := l.usageClient.sync(nodes); err != nil {
|
||||
if err := l.usageClient.sync(ctx, nodes); err != nil {
|
||||
return &frameworktypes.Status{
|
||||
Err: fmt.Errorf("error getting node usage: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, l.usageClient),
|
||||
getNodeThresholds(nodes, l.args.Thresholds, l.args.TargetThresholds, l.resourceNames, l.args.UseDeviationThresholds, l.usageClient),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
// starts by taking a snapshot ofthe nodes usage. we will use this
|
||||
// snapshot to assess the nodes usage and classify them as
|
||||
// underutilized or overutilized.
|
||||
nodesMap, nodesUsageMap, podListMap := getNodeUsageSnapshot(nodes, l.usageClient)
|
||||
capacities := referencedResourceListForNodesCapacity(nodes)
|
||||
|
||||
// usage, by default, is exposed in absolute values. we need to normalize
|
||||
// them (convert them to percentages) to be able to compare them with the
|
||||
// user provided thresholds. thresholds are already provided in percentage
|
||||
// in the <0; 100> interval.
|
||||
var usage map[string]api.ResourceThresholds
|
||||
var thresholds map[string][]api.ResourceThresholds
|
||||
if l.args.UseDeviationThresholds {
|
||||
// here the thresholds provided by the user represent
|
||||
// deviations from the average so we need to treat them
|
||||
// differently. when calculating the average we only
|
||||
// need to consider the resources for which the user
|
||||
// has provided thresholds.
|
||||
usage, thresholds = assessNodesUsagesAndRelativeThresholds(
|
||||
filterResourceNames(nodesUsageMap, l.resourceNames),
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
} else {
|
||||
usage, thresholds = assessNodesUsagesAndStaticThresholds(
|
||||
nodesUsageMap,
|
||||
capacities,
|
||||
l.args.Thresholds,
|
||||
l.args.TargetThresholds,
|
||||
)
|
||||
}
|
||||
|
||||
// classify nodes in under and over utilized. we will later try to move
|
||||
// pods from the overutilized nodes to the underutilized ones.
|
||||
nodeGroups := classifier.Classify(
|
||||
usage, thresholds,
|
||||
// underutilization criteria processing. nodes that are
|
||||
// underutilized but aren't schedulable are ignored.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
|
||||
klog.V(2).InfoS(
|
||||
"Node is unschedulable, thus not considered as underutilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
)
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
return isNodeBelowThreshold(usage, threshold)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
|
||||
// overutilization criteria evaluation.
|
||||
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
|
||||
return isNodeAboveThreshold(usage, threshold)
|
||||
},
|
||||
)
|
||||
|
||||
// log message for nodes with low utilization
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", l.underutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
// the nodeutilization package was designed to work with NodeInfo
|
||||
// structs. these structs holds information about how utilized a node
|
||||
// is. we need to go through the result of the classification and turn
|
||||
// it into NodeInfo structs.
|
||||
nodeInfos := make([][]NodeInfo, 2)
|
||||
categories := []string{"underutilized", "overutilized"}
|
||||
classifiedNodes := map[string]bool{}
|
||||
for i := range nodeGroups {
|
||||
for nodeName := range nodeGroups[i] {
|
||||
classifiedNodes[nodeName] = true
|
||||
|
||||
// log message for over utilized nodes
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", l.overutilizationCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(sourceNodes))
|
||||
klog.InfoS(
|
||||
"Node has been classified",
|
||||
"category", categories[i],
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
|
||||
nodeInfos[i] = append(nodeInfos[i], NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: nodesMap[nodeName],
|
||||
usage: nodesUsageMap[nodeName],
|
||||
allPods: podListMap[nodeName],
|
||||
},
|
||||
available: capNodeCapacitiesToThreshold(
|
||||
nodesMap[nodeName],
|
||||
thresholds[nodeName][1],
|
||||
l.extendedResourceNames,
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// log nodes that are appropriately utilized.
|
||||
for nodeName := range nodesMap {
|
||||
if !classifiedNodes[nodeName] {
|
||||
klog.InfoS(
|
||||
"Node is appropriately utilized",
|
||||
"node", klog.KObj(nodesMap[nodeName]),
|
||||
"usage", nodesUsageMap[nodeName],
|
||||
"usagePercentage", normalizer.Round(usage[nodeName]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
|
||||
|
||||
// log messages for nodes with low and high utilization
|
||||
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
|
||||
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
|
||||
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
|
||||
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes))
|
||||
|
||||
if len(lowNodes) == 0 {
|
||||
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
|
||||
klog.V(1).InfoS(
|
||||
"No node is underutilized, nothing to do here, you might tune your thresholds further",
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(lowNodes) <= l.args.NumberOfNodes {
|
||||
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", l.args.NumberOfNodes)
|
||||
klog.V(1).InfoS(
|
||||
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
|
||||
"underutilizedNodes", len(lowNodes),
|
||||
"numberOfNodes", l.args.NumberOfNodes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -162,14 +271,15 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(sourceNodes) == 0 {
|
||||
if len(highNodes) == 0 {
|
||||
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
|
||||
return nil
|
||||
}
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||
// this is a stop condition for the eviction process. we stop as soon
|
||||
// as the node usage drops below the threshold.
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.available) {
|
||||
return false
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -181,52 +291,90 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(sourceNodes, false)
|
||||
// sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(highNodes, false)
|
||||
|
||||
var nodeLimit *uint
|
||||
if l.args.EvictionLimits != nil {
|
||||
nodeLimit = l.args.EvictionLimits.Node
|
||||
}
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
l.args.EvictableNamespaces,
|
||||
sourceNodes,
|
||||
highNodes,
|
||||
lowNodes,
|
||||
l.handle.Evictor(),
|
||||
evictions.EvictOptions{StrategyName: LowNodeUtilizationPluginName},
|
||||
l.podFilter,
|
||||
l.resourceNames,
|
||||
l.extendedResourceNames,
|
||||
continueEvictionCond,
|
||||
l.usageClient,
|
||||
nodeLimit,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultForLNUThresholds(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) {
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
// validatePrometheusMetricsUtilization validates the Prometheus metrics
|
||||
// utilization. XXX this should be done way earlier than this.
|
||||
func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error {
|
||||
if args.MetricsUtilization.Prometheus == nil {
|
||||
return fmt.Errorf("prometheus property is missing")
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
|
||||
if args.MetricsUtilization.Prometheus.Query == "" {
|
||||
return fmt.Errorf("prometheus query is missing")
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
uResourceNames := getResourceNames(args.Thresholds)
|
||||
oResourceNames := getResourceNames(args.TargetThresholds)
|
||||
if len(uResourceNames) != 1 || uResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"thresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, uResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
if len(oResourceNames) != 1 || oResourceNames[0] != MetricResource {
|
||||
return fmt.Errorf(
|
||||
"targetThresholds are expected to specify a single instance of %q resource, got %v instead",
|
||||
MetricResource, oResourceNames,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// usageClientForMetrics returns the correct usage client based on the
|
||||
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
|
||||
func usageClientForMetrics(
|
||||
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
|
||||
) (usageClient, error) {
|
||||
metrics := args.MetricsUtilization
|
||||
switch {
|
||||
case metrics.MetricsServer, metrics.Source == api.KubernetesMetrics:
|
||||
if handle.MetricsCollector() == nil {
|
||||
return nil, fmt.Errorf("metrics client not initialized")
|
||||
}
|
||||
return newActualUsageClient(
|
||||
resources,
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.MetricsCollector(),
|
||||
), nil
|
||||
|
||||
case metrics.Source == api.PrometheusMetrics:
|
||||
if handle.PrometheusClient() == nil {
|
||||
return nil, fmt.Errorf("prometheus client not initialized")
|
||||
}
|
||||
return newPrometheusUsageClient(
|
||||
handle.GetPodsAssignedToNodeFunc(),
|
||||
handle.PrometheusClient(),
|
||||
metrics.Prometheus.Query,
|
||||
), nil
|
||||
case metrics.Source != "":
|
||||
return nil, fmt.Errorf("unrecognized metrics source")
|
||||
default:
|
||||
return nil, fmt.Errorf("metrics source is empty")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
@@ -40,6 +41,8 @@ import (
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestLowNodeUtilization(t *testing.T) {
|
||||
@@ -63,6 +66,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsWithMetricsEvicted uint
|
||||
evictedPods []string
|
||||
evictableNamespaces *api.Namespaces
|
||||
evictionLimits *api.EvictionLimits
|
||||
}{
|
||||
{
|
||||
name: "no evictable pods",
|
||||
@@ -712,6 +716,60 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "with extended resource in some of nodes with deviation",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 5,
|
||||
extendedResource: 10,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourcePods: 5,
|
||||
extendedResource: 10,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
|
||||
test.SetNodeExtendedResource(node, extendedResource, 8)
|
||||
}),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 0, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
|
||||
}),
|
||||
test.BuildTestPod("p2", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod with extended resource.
|
||||
test.SetRSOwnerRef(pod)
|
||||
test.SetPodExtendedResourceRequest(pod, extendedResource, 7)
|
||||
}),
|
||||
test.BuildTestPod("p3", 0, 0, n2NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p8", 0, 0, n3NodeName, func(pod *v1.Pod) {
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
test.BuildTestPod("p9", 0, 0, n3NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "without priorities, but only other node is unschedulable",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -1018,6 +1076,79 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds and overevicting memory",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
// totalcpuusage = 3600m, avgcpuusage = 3600/12000 = 0.3 => 30%
|
||||
// totalpodsusage = 9, avgpodsusage = 9/30 = 0.3 => 30%
|
||||
// n1 and n2 are fully memory utilized
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 375, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 375, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 375, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 3000, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n2NodeName, 4000, 3000),
|
||||
test.BuildNodeMetrics(n3NodeName, 4000, 3000),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 375),
|
||||
test.BuildPodMetrics("p2", 400, 375),
|
||||
test.BuildPodMetrics("p3", 400, 375),
|
||||
test.BuildPodMetrics("p4", 400, 375),
|
||||
test.BuildPodMetrics("p5", 400, 375),
|
||||
test.BuildPodMetrics("p6", 400, 375),
|
||||
test.BuildPodMetrics("p7", 400, 375),
|
||||
test.BuildPodMetrics("p8", 400, 375),
|
||||
test.BuildPodMetrics("p9", 400, 3000),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
expectedPodsWithMetricsEvicted: 0,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "without priorities different evictions for requested and actual resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
@@ -1122,6 +1253,72 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
expectedPodsEvicted: 3,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "without priorities with node eviction limit",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
evictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](2),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
nodemetricses: []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics(n1NodeName, 3201, 0),
|
||||
test.BuildNodeMetrics(n2NodeName, 401, 0),
|
||||
test.BuildNodeMetrics(n3NodeName, 11, 0),
|
||||
},
|
||||
podmetricses: []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 401, 0),
|
||||
test.BuildPodMetrics("p2", 401, 0),
|
||||
test.BuildPodMetrics("p3", 401, 0),
|
||||
test.BuildPodMetrics("p4", 401, 0),
|
||||
test.BuildPodMetrics("p5", 401, 0),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
expectedPodsWithMetricsEvicted: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
@@ -1189,14 +1386,18 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
}
|
||||
handle.MetricsCollectorImpl = collector
|
||||
|
||||
var metricsUtilization *MetricsUtilization
|
||||
if metricsEnabled {
|
||||
metricsUtilization = &MetricsUtilization{Source: api.KubernetesMetrics}
|
||||
}
|
||||
|
||||
plugin, err := NewLowNodeUtilization(&LowNodeUtilizationArgs{
|
||||
Thresholds: tc.thresholds,
|
||||
TargetThresholds: tc.targetThresholds,
|
||||
UseDeviationThresholds: tc.useDeviationThresholds,
|
||||
EvictionLimits: tc.evictionLimits,
|
||||
EvictableNamespaces: tc.evictableNamespaces,
|
||||
MetricsUtilization: MetricsUtilization{
|
||||
MetricsServer: metricsEnabled,
|
||||
},
|
||||
MetricsUtilization: metricsUtilization,
|
||||
},
|
||||
handle)
|
||||
if err != nil {
|
||||
@@ -1370,3 +1571,278 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withLocalStorage(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}
|
||||
|
||||
func withCriticalPod(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}
|
||||
|
||||
func TestLowNodeUtilizationWithPrometheusMetrics(t *testing.T) {
|
||||
n1NodeName := "n1"
|
||||
n2NodeName := "n2"
|
||||
n3NodeName := "n3"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
samples model.Vector
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
expectedPodsEvicted uint
|
||||
evictedPods []string
|
||||
args *LowNodeUtilizationArgs
|
||||
}{
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with more evictions",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 30,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 50,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](3),
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query with deviation",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
MetricResource: 5,
|
||||
},
|
||||
EvictionLimits: &api.EvictionLimits{
|
||||
Node: ptr.To[uint](2),
|
||||
},
|
||||
UseDeviationThresholds: true,
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 0.5695757575757561),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0.20381818181818104),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
},
|
||||
{
|
||||
name: "with instance:node_cpu:rate:sum query and deviation thresholds",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
UseDeviationThresholds: true,
|
||||
Thresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
TargetThresholds: api.ResourceThresholds{MetricResource: 10},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
Prometheus: &Prometheus{
|
||||
Query: "instance:node_cpu:rate:sum",
|
||||
},
|
||||
},
|
||||
},
|
||||
samples: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", n1NodeName, 1),
|
||||
sample("instance:node_cpu:rate:sum", n2NodeName, 0.5),
|
||||
sample("instance:node_cpu:rate:sum", n3NodeName, 0),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, withLocalStorage),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, withCriticalPod),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
testFnc := func(metricsEnabled bool, expectedPodsEvicted uint) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
podsForEviction := make(map[string]struct{})
|
||||
for _, pod := range tc.evictedPods {
|
||||
podsForEviction[pod] = struct{}{}
|
||||
}
|
||||
|
||||
evictionFailed := false
|
||||
if len(tc.evictedPods) > 0 {
|
||||
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.CreateAction)
|
||||
obj := getAction.GetObject()
|
||||
if eviction, ok := obj.(*policy.Eviction); ok {
|
||||
if _, exists := podsForEviction[eviction.Name]; exists {
|
||||
return true, obj, nil
|
||||
}
|
||||
evictionFailed = true
|
||||
return true, nil, fmt.Errorf("pod %q was unexpectedly evicted", eviction.Name)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
}
|
||||
|
||||
handle, podEvictor, err := frameworktesting.InitFrameworkHandle(ctx, fakeClient, nil, defaultevictor.DefaultEvictorArgs{NodeFit: true}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize a framework handle: %v", err)
|
||||
}
|
||||
|
||||
handle.PrometheusClientImpl = &fakePromClient{
|
||||
result: tc.samples,
|
||||
dataType: model.ValVector,
|
||||
}
|
||||
plugin, err := NewLowNodeUtilization(tc.args, handle)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize the plugin: %v", err)
|
||||
}
|
||||
|
||||
status := plugin.(frameworktypes.BalancePlugin).Balance(ctx, tc.nodes)
|
||||
if status != nil {
|
||||
t.Fatalf("Balance.err: %v", status.Err)
|
||||
}
|
||||
|
||||
podsEvicted := podEvictor.TotalEvicted()
|
||||
if expectedPodsEvicted != podsEvicted {
|
||||
t.Errorf("Expected %v pods to be evicted but %v got evicted", expectedPodsEvicted, podsEvicted)
|
||||
}
|
||||
if evictionFailed {
|
||||
t.Errorf("Pod evictions failed unexpectedly")
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Run(tc.name, testFnc(false, tc.expectedPodsEvicted))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
@@ -27,188 +29,122 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
|
||||
type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage map[v1.ResourceName]*resource.Quantity
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodeThresholds struct {
|
||||
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
NodeUsage
|
||||
thresholds NodeThresholds
|
||||
}
|
||||
|
||||
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
// []NodeUsage is a snapshot, so allPods can not be read any time to avoid
|
||||
// breaking consistency between the node's actual usage and available pods.
|
||||
//
|
||||
// New data model:
|
||||
// - node usage: map[string]api.ReferencedResourceList
|
||||
// - thresholds: map[string]api.ReferencedResourceList
|
||||
// - all pods: map[string][]*v1.Pod
|
||||
//
|
||||
// After classification:
|
||||
// - each group will have its own (smaller) node usage and thresholds and
|
||||
// allPods.
|
||||
//
|
||||
// Both node usage and thresholds are needed to compute the remaining resources
|
||||
// that can be evicted/can accepted evicted pods.
|
||||
//
|
||||
// 1. translate node usages into percentages as float or int64 (how much
|
||||
// precision is lost?, maybe use BigInt?).
|
||||
// 2. produce thresholds (if they need to be computed, otherwise use user
|
||||
// provided, they are already in percentages).
|
||||
// 3. classify nodes into groups.
|
||||
// 4. produces a list of nodes (sorted as before) that have the node usage,
|
||||
// the threshold (only one this time) and the snapshottted pod list
|
||||
// present.
|
||||
//
|
||||
// Data wise
|
||||
// Produce separated maps for:
|
||||
// - nodes: map[string]*v1.Node
|
||||
// - node usage: map[string]api.ReferencedResourceList
|
||||
// - thresholds: map[string][]api.ReferencedResourceList
|
||||
// - pod list: map[string][]*v1.Pod
|
||||
//
|
||||
// Once the nodes are classified produce the original []NodeInfo so the code is
|
||||
// not that much changed (postponing further refactoring once it is needed).
|
||||
|
||||
const (
|
||||
// MetricResource is a special resource name we use to keep track of a
|
||||
// metric obtained from a third party entity.
|
||||
MetricResource = v1.ResourceName("MetricResource")
|
||||
// MinResourcePercentage is the minimum value of a resource's percentage
|
||||
MinResourcePercentage = 0
|
||||
// MaxResourcePercentage is the maximum value of a resource's percentage
|
||||
MaxResourcePercentage = 100
|
||||
)
|
||||
|
||||
func normalizePercentage(percent api.Percentage) api.Percentage {
|
||||
if percent > MaxResourcePercentage {
|
||||
return MaxResourcePercentage
|
||||
}
|
||||
if percent < MinResourcePercentage {
|
||||
return MinResourcePercentage
|
||||
}
|
||||
return percent
|
||||
// NodeUsage stores a node's info, pods on it, thresholds and its resource
|
||||
// usage.
|
||||
type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage api.ReferencedResourceList
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
func getNodeThresholds(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
useDeviationThresholds bool,
|
||||
usageClient usageClient,
|
||||
) map[string]NodeThresholds {
|
||||
nodeThresholdsMap := map[string]NodeThresholds{}
|
||||
|
||||
averageResourceUsagePercent := api.ResourceThresholds{}
|
||||
if useDeviationThresholds {
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, usageClient)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
nodeThresholdsMap[node.Name] = NodeThresholds{
|
||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
}
|
||||
|
||||
for _, resourceName := range resourceNames {
|
||||
if useDeviationThresholds {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if lowThreshold[resourceName] == MinResourcePercentage {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = &cap
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = &cap
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]-lowThreshold[resourceName]))
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]+highThreshold[resourceName]))
|
||||
}
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, lowThreshold[resourceName])
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, highThreshold[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nodeThresholdsMap
|
||||
// NodeInfo is an entity we use to gather information about a given node. here
|
||||
// we have its resource usage as well as the amount of available resources.
|
||||
// we use this struct to carry information around and to make it easier to
|
||||
// process.
|
||||
type NodeInfo struct {
|
||||
NodeUsage
|
||||
available api.ReferencedResourceList
|
||||
}
|
||||
|
||||
func getNodeUsage(
|
||||
// continueEvictionCont is a function that determines if we should keep
|
||||
// evicting pods or not.
|
||||
type continueEvictionCond func(NodeInfo, api.ReferencedResourceList) bool
|
||||
|
||||
// getNodeUsageSnapshot separates the snapshot into easily accesible data
|
||||
// chunks so the node usage can be processed separately. returns a map of
|
||||
// nodes, a map of their usage and a map of their pods. maps are indexed
|
||||
// by node name.
|
||||
func getNodeUsageSnapshot(
|
||||
nodes []*v1.Node,
|
||||
usageClient usageClient,
|
||||
) []NodeUsage {
|
||||
var nodeUsageList []NodeUsage
|
||||
) (
|
||||
map[string]*v1.Node,
|
||||
map[string]api.ReferencedResourceList,
|
||||
map[string][]*v1.Pod,
|
||||
) {
|
||||
// XXX node usage needs to be kept in the original resource quantity
|
||||
// since converting to percentages and back is losing precision.
|
||||
nodesUsageMap := make(map[string]api.ReferencedResourceList)
|
||||
podListMap := make(map[string][]*v1.Pod)
|
||||
nodesMap := make(map[string]*v1.Node)
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: usageClient.nodeUtilization(node.Name),
|
||||
allPods: usageClient.pods(node.Name),
|
||||
})
|
||||
nodesMap[node.Name] = node
|
||||
nodesUsageMap[node.Name] = usageClient.nodeUtilization(node.Name)
|
||||
podListMap[node.Name] = usageClient.pods(node.Name)
|
||||
}
|
||||
|
||||
return nodeUsageList
|
||||
return nodesMap, nodesUsageMap, podListMap
|
||||
}
|
||||
|
||||
func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceName, threshold api.Percentage) *resource.Quantity {
|
||||
defaultFormat := resource.DecimalSI
|
||||
if resourceName == v1.ResourceMemory {
|
||||
defaultFormat = resource.BinarySI
|
||||
// thresholdsToKeysAndValues converts a ResourceThresholds into a list of keys
|
||||
// and values. this is useful for logging.
|
||||
func thresholdsToKeysAndValues(thresholds api.ResourceThresholds) []any {
|
||||
result := []any{}
|
||||
for name, value := range thresholds {
|
||||
result = append(result, name, fmt.Sprintf("%.2f%%", value))
|
||||
}
|
||||
|
||||
resourceCapacityFraction := func(resourceNodeCapacity int64) int64 {
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given resource threshold in Quantity units.
|
||||
return int64(float64(threshold) * 0.01 * float64(resourceNodeCapacity))
|
||||
}
|
||||
|
||||
resourceCapacityQuantity := nodeCapacity.Name(resourceName, defaultFormat)
|
||||
|
||||
if resourceName == v1.ResourceCPU {
|
||||
return resource.NewMilliQuantity(resourceCapacityFraction(resourceCapacityQuantity.MilliValue()), defaultFormat)
|
||||
}
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
return result
|
||||
}
|
||||
|
||||
func roundTo2Decimals(percentage float64) float64 {
|
||||
return math.Round(percentage*100) / 100
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = nodeUsage.node.Status.Allocatable
|
||||
}
|
||||
|
||||
resourceUsagePercentage := map[v1.ResourceName]float64{}
|
||||
for resourceName, resourceUsage := range nodeUsage.usage {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if !cap.IsZero() {
|
||||
resourceUsagePercentage[resourceName] = 100 * float64(resourceUsage.MilliValue()) / float64(cap.MilliValue())
|
||||
resourceUsagePercentage[resourceName] = roundTo2Decimals(resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
return resourceUsagePercentage
|
||||
}
|
||||
|
||||
// classifyNodes classifies the nodes into low-utilization or high-utilization nodes. If a node lies between
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(
|
||||
nodeUsages []NodeUsage,
|
||||
nodeThresholds map[string]NodeThresholds,
|
||||
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool,
|
||||
) ([]NodeInfo, []NodeInfo) {
|
||||
lowNodes, highNodes := []NodeInfo{}, []NodeInfo{}
|
||||
|
||||
for _, nodeUsage := range nodeUsages {
|
||||
nodeInfo := NodeInfo{
|
||||
NodeUsage: nodeUsage,
|
||||
thresholds: nodeThresholds[nodeUsage.node.Name],
|
||||
}
|
||||
if lowThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
lowNodes = append(lowNodes, nodeInfo)
|
||||
} else if highThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
highNodes = append(highNodes, nodeInfo)
|
||||
} else {
|
||||
klog.InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
}
|
||||
}
|
||||
|
||||
return lowNodes, highNodes
|
||||
}
|
||||
|
||||
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
|
||||
// log message in one line
|
||||
keysAndValues := []interface{}{}
|
||||
// usageToKeysAndValues converts a ReferencedResourceList into a list of
|
||||
// keys and values. this is useful for logging.
|
||||
func usageToKeysAndValues(usage api.ReferencedResourceList) []any {
|
||||
keysAndValues := []any{}
|
||||
if quantity, exists := usage[v1.ResourceCPU]; exists {
|
||||
keysAndValues = append(keysAndValues, "CPU", quantity.MilliValue())
|
||||
}
|
||||
@@ -220,15 +156,14 @@ func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interf
|
||||
}
|
||||
for name := range usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), usage[name].Value())
|
||||
keysAndValues = append(keysAndValues, name, usage[name].Value())
|
||||
}
|
||||
}
|
||||
return keysAndValues
|
||||
}
|
||||
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on the node have priority, if not
|
||||
// evicts them based on QoS as fallback option.
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
// evictPodsFromSourceNodes evicts pods based on priority, if all the pods on
|
||||
// the node have priority, if not evicts them based on QoS as fallback option.
|
||||
func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
@@ -239,49 +174,67 @@ func evictPodsFromSourceNodes(
|
||||
resourceNames []v1.ResourceName,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) {
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
|
||||
for _, resourceName := range resourceNames {
|
||||
totalAvailableUsage[resourceName] = &resource.Quantity{}
|
||||
available, err := assessAvailableResourceInNodes(destinationNodes, resourceNames)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to assess available resources in nodes")
|
||||
return
|
||||
}
|
||||
|
||||
taintsOfDestinationNodes := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(available)...)
|
||||
|
||||
destinationTaints := make(map[string][]v1.Taint, len(destinationNodes))
|
||||
for _, node := range destinationNodes {
|
||||
taintsOfDestinationNodes[node.node.Name] = node.node.Spec.Taints
|
||||
|
||||
for _, name := range resourceNames {
|
||||
if _, exists := node.usage[name]; !exists {
|
||||
klog.Errorf("unable to find %q resource in node's %q usage, terminating eviction", name, node.node.Name)
|
||||
return
|
||||
}
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
totalAvailableUsage[name].Add(*node.thresholds.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||
}
|
||||
destinationTaints[node.node.Name] = node.node.Spec.Taints
|
||||
}
|
||||
|
||||
// log message in one line
|
||||
klog.V(1).InfoS("Total capacity to be moved", usageToKeysAndValues(totalAvailableUsage)...)
|
||||
|
||||
for _, node := range sourceNodes {
|
||||
klog.V(3).InfoS("Evicting pods from node", "node", klog.KObj(node.node), "usage", node.usage)
|
||||
klog.V(3).InfoS(
|
||||
"Evicting pods from node",
|
||||
"node", klog.KObj(node.node),
|
||||
"usage", node.usage,
|
||||
)
|
||||
|
||||
nonRemovablePods, removablePods := classifyPods(node.allPods, podFilter)
|
||||
klog.V(2).InfoS("Pods on node", "node", klog.KObj(node.node), "allPods", len(node.allPods), "nonRemovablePods", len(nonRemovablePods), "removablePods", len(removablePods))
|
||||
klog.V(2).InfoS(
|
||||
"Pods on node",
|
||||
"node", klog.KObj(node.node),
|
||||
"allPods", len(node.allPods),
|
||||
"nonRemovablePods", len(nonRemovablePods),
|
||||
"removablePods", len(removablePods),
|
||||
)
|
||||
|
||||
if len(removablePods) == 0 {
|
||||
klog.V(1).InfoS("No removable pods on node, try next node", "node", klog.KObj(node.node))
|
||||
klog.V(1).InfoS(
|
||||
"No removable pods on node, try next node",
|
||||
"node", klog.KObj(node.node),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers")
|
||||
// sort the evictable Pods based on priority. This also sorts them based on QoS. If there are multiple pods with same priority, they are sorted based on QoS tiers.
|
||||
klog.V(1).InfoS(
|
||||
"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers",
|
||||
)
|
||||
|
||||
// sort the evictable Pods based on priority. This also sorts
|
||||
// them based on QoS. If there are multiple pods with same
|
||||
// priority, they are sorted based on QoS tiers.
|
||||
podutil.SortPodsBasedOnPriorityLowToHigh(removablePods)
|
||||
err := evictPods(ctx, evictableNamespaces, removablePods, node, totalAvailableUsage, taintsOfDestinationNodes, podEvictor, evictOptions, continueEviction, usageClient)
|
||||
if err != nil {
|
||||
|
||||
if err := evictPods(
|
||||
ctx,
|
||||
evictableNamespaces,
|
||||
removablePods,
|
||||
node,
|
||||
available,
|
||||
destinationTaints,
|
||||
podEvictor,
|
||||
evictOptions,
|
||||
continueEviction,
|
||||
usageClient,
|
||||
maxNoOfPodsToEvictPerNode,
|
||||
); err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionTotalLimitError:
|
||||
return
|
||||
@@ -291,83 +244,136 @@ func evictPodsFromSourceNodes(
|
||||
}
|
||||
}
|
||||
|
||||
// evictPods keeps evicting pods until the continueEviction function returns
|
||||
// false or we can't or shouldn't evict any more pods. available node resources
|
||||
// are updated after each eviction.
|
||||
func evictPods(
|
||||
ctx context.Context,
|
||||
evictableNamespaces *api.Namespaces,
|
||||
inputPods []*v1.Pod,
|
||||
nodeInfo NodeInfo,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
totalAvailableUsage api.ReferencedResourceList,
|
||||
destinationTaints map[string][]v1.Taint,
|
||||
podEvictor frameworktypes.Evictor,
|
||||
evictOptions evictions.EvictOptions,
|
||||
continueEviction continueEvictionCond,
|
||||
usageClient usageClient,
|
||||
maxNoOfPodsToEvictPerNode *uint,
|
||||
) error {
|
||||
// preemptive check to see if we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// some namespaces can be excluded from the eviction process.
|
||||
var excludedNamespaces sets.Set[string]
|
||||
if evictableNamespaces != nil {
|
||||
excludedNamespaces = sets.New(evictableNamespaces.Exclude...)
|
||||
}
|
||||
|
||||
if continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
var evictionCounter uint = 0
|
||||
for _, pod := range inputPods {
|
||||
if maxNoOfPodsToEvictPerNode != nil && evictionCounter >= *maxNoOfPodsToEvictPerNode {
|
||||
klog.V(3).InfoS(
|
||||
"Max number of evictions per node per plugin reached",
|
||||
"limit", *maxNoOfPodsToEvictPerNode,
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
preEvictionFilterWithOptions, err := podutil.NewOptions().
|
||||
WithFilter(podEvictor.PreEvictionFilter).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
continue
|
||||
}
|
||||
if !utils.PodToleratesTaints(pod, destinationTaints) {
|
||||
klog.V(3).InfoS(
|
||||
"Skipping eviction for pod, doesn't tolerate node taint",
|
||||
"pod", klog.KObj(pod),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get pod usage for %v/%v: %v", pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
err = podEvictor.Evict(ctx, pod, evictOptions)
|
||||
if err == nil {
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
// verify if we can evict the pod based on the pod evictor
|
||||
// filter and on the excluded namespaces.
|
||||
preEvictionFilterWithOptions, err := podutil.
|
||||
NewOptions().
|
||||
WithFilter(podEvictor.PreEvictionFilter).
|
||||
WithoutNamespaces(excludedNamespaces).
|
||||
BuildFilterFunc()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "could not build preEvictionFilter with namespace exclusion")
|
||||
continue
|
||||
}
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
totalAvailableUsage[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
if !preEvictionFilterWithOptions(pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
// in case podUsage does not support resource counting (e.g.
|
||||
// provided metric does not quantify pod resource utilization).
|
||||
unconstrainedResourceEviction := false
|
||||
podUsage, err := usageClient.podUsage(pod)
|
||||
if err != nil {
|
||||
if _, ok := err.(*notSupportedError); !ok {
|
||||
klog.Errorf(
|
||||
"unable to get pod usage for %v/%v: %v",
|
||||
pod.Namespace, pod.Name, err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
unconstrainedResourceEviction = true
|
||||
}
|
||||
|
||||
if err := podEvictor.Evict(ctx, pod, evictOptions); err != nil {
|
||||
switch err.(type) {
|
||||
case *evictions.EvictionNodeLimitError, *evictions.EvictionTotalLimitError:
|
||||
return err
|
||||
default:
|
||||
klog.Errorf("eviction failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if maxNoOfPodsToEvictPerNode == nil && unconstrainedResourceEviction {
|
||||
klog.V(3).InfoS("Currently, only a single pod eviction is allowed")
|
||||
break
|
||||
}
|
||||
|
||||
evictionCounter++
|
||||
klog.V(3).InfoS("Evicted pods", "pod", klog.KObj(pod))
|
||||
if unconstrainedResourceEviction {
|
||||
continue
|
||||
}
|
||||
|
||||
subtractPodUsageFromNodeAvailability(totalAvailableUsage, &nodeInfo, podUsage)
|
||||
|
||||
keysAndValues := []any{"node", nodeInfo.node.Name}
|
||||
keysAndValues = append(keysAndValues, usageToKeysAndValues(nodeInfo.usage)...)
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
|
||||
// make sure we should continue evicting pods.
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// subtractPodUsageFromNodeAvailability subtracts the pod usage from the node
|
||||
// available resources. this is done to keep track of the remaining resources
|
||||
// that can be used to move pods around.
|
||||
func subtractPodUsageFromNodeAvailability(
|
||||
available api.ReferencedResourceList,
|
||||
nodeInfo *NodeInfo,
|
||||
podUsage api.ReferencedResourceList,
|
||||
) {
|
||||
for name := range available {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
available[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
continue
|
||||
}
|
||||
nodeInfo.usage[name].Sub(*podUsage[name])
|
||||
available[name].Sub(*podUsage[name])
|
||||
}
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given plugin.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
@@ -400,7 +406,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
|
||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||
// At least one resource has to be above the high threshold
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.highResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
@@ -410,16 +416,25 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
|
||||
return false
|
||||
}
|
||||
|
||||
// isNodeWithLowUtilization checks if a node is underutilized
|
||||
// All resources have to be below the low threshold
|
||||
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.lowResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
// isNodeAboveThreshold checks if a node is over a threshold
|
||||
// At least one resource has to be above the threshold
|
||||
func isNodeAboveThreshold(usage, threshold api.ResourceThresholds) bool {
|
||||
for name := range threshold {
|
||||
if threshold[name] < usage[name] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isNodeBelowThreshold checks if a node is under a threshold
|
||||
// All resources have to be below the threshold
|
||||
func isNodeBelowThreshold(usage, threshold api.ResourceThresholds) bool {
|
||||
for name := range threshold {
|
||||
if threshold[name] < usage[name] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -432,6 +447,8 @@ func getResourceNames(thresholds api.ResourceThresholds) []v1.ResourceName {
|
||||
return resourceNames
|
||||
}
|
||||
|
||||
// classifyPods classify them in two lists: removable and non-removable.
|
||||
// Removable pods are those that can be evicted.
|
||||
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, removablePods []*v1.Pod
|
||||
|
||||
@@ -446,27 +463,309 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
func averageNodeBasicresources(nodes []*v1.Node, usageClient usageClient) api.ResourceThresholds {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
// assessNodesUsagesAndStaticThresholds converts the raw usage data into
|
||||
// percentage. Returns the usage (pct) and the thresholds (pct) for each
|
||||
// node.
|
||||
func assessNodesUsagesAndStaticThresholds(
|
||||
rawUsages, rawCapacities map[string]api.ReferencedResourceList,
|
||||
lowSpan, highSpan api.ResourceThresholds,
|
||||
) (map[string]api.ResourceThresholds, map[string][]api.ResourceThresholds) {
|
||||
// first we normalize the node usage from the raw data (Mi, Gi, etc)
|
||||
// into api.Percentage values.
|
||||
usage := normalizer.Normalize(
|
||||
rawUsages, rawCapacities, ResourceUsageToResourceThreshold,
|
||||
)
|
||||
|
||||
// we are not taking the average and applying deviations to it we can
|
||||
// simply replicate the same threshold across all nodes and return.
|
||||
thresholds := normalizer.Replicate(
|
||||
slices.Collect(maps.Keys(usage)),
|
||||
[]api.ResourceThresholds{lowSpan, highSpan},
|
||||
)
|
||||
return usage, thresholds
|
||||
}
|
||||
|
||||
// assessNodesUsagesAndRelativeThresholds converts the raw usage data into
|
||||
// percentage. Thresholds are calculated based on the average usage. Returns
|
||||
// the usage (pct) and the thresholds (pct) for each node.
|
||||
func assessNodesUsagesAndRelativeThresholds(
|
||||
rawUsages, rawCapacities map[string]api.ReferencedResourceList,
|
||||
lowSpan, highSpan api.ResourceThresholds,
|
||||
) (map[string]api.ResourceThresholds, map[string][]api.ResourceThresholds) {
|
||||
// first we normalize the node usage from the raw data (Mi, Gi, etc)
|
||||
// into api.Percentage values.
|
||||
usage := normalizer.Normalize(
|
||||
rawUsages, rawCapacities, ResourceUsageToResourceThreshold,
|
||||
)
|
||||
|
||||
// calculate the average usage.
|
||||
average := normalizer.Average(usage)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed average usage",
|
||||
thresholdsToKeysAndValues(average)...,
|
||||
)
|
||||
|
||||
// decrease the provided threshold from the average to get the low
|
||||
// span. also make sure the resulting values are between 0 and 100.
|
||||
lowerThresholds := normalizer.Clamp(
|
||||
normalizer.Sum(average, normalizer.Negate(lowSpan)), 0, 100,
|
||||
)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed thresholds for underutilized nodes",
|
||||
thresholdsToKeysAndValues(lowerThresholds)...,
|
||||
)
|
||||
|
||||
// increase the provided threshold from the average to get the high
|
||||
// span. also make sure the resulting values are between 0 and 100.
|
||||
higherThresholds := normalizer.Clamp(
|
||||
normalizer.Sum(average, highSpan), 0, 100,
|
||||
)
|
||||
klog.V(3).InfoS(
|
||||
"Assessed thresholds for overutilized nodes",
|
||||
thresholdsToKeysAndValues(higherThresholds)...,
|
||||
)
|
||||
|
||||
// replicate the same assessed thresholds to all nodes.
|
||||
thresholds := normalizer.Replicate(
|
||||
slices.Collect(maps.Keys(usage)),
|
||||
[]api.ResourceThresholds{lowerThresholds, higherThresholds},
|
||||
)
|
||||
|
||||
return usage, thresholds
|
||||
}
|
||||
|
||||
// referencedResourceListForNodesCapacity returns a ReferencedResourceList for
|
||||
// the capacity of a list of nodes. If allocatable resources are present, they
|
||||
// are used instead of capacity.
|
||||
func referencedResourceListForNodesCapacity(nodes []*v1.Node) map[string]api.ReferencedResourceList {
|
||||
capacities := map[string]api.ReferencedResourceList{}
|
||||
for _, node := range nodes {
|
||||
usage := usageClient.nodeUtilization(node.Name)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
capacities[node.Name] = referencedResourceListForNodeCapacity(node)
|
||||
}
|
||||
return capacities
|
||||
}
|
||||
|
||||
// referencedResourceListForNodeCapacity returns a ReferencedResourceList for
|
||||
// the capacity of a node. If allocatable resources are present, they are used
|
||||
// instead of capacity.
|
||||
func referencedResourceListForNodeCapacity(node *v1.Node) api.ReferencedResourceList {
|
||||
capacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
capacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
referenced := api.ReferencedResourceList{}
|
||||
for name, quantity := range capacity {
|
||||
referenced[name] = ptr.To(quantity)
|
||||
}
|
||||
|
||||
// XXX the descheduler also manages monitoring queries that are
|
||||
// supposed to return a value representing a percentage of the
|
||||
// resource usage. In this case we need to provide a value for
|
||||
// the MetricResource, which is not present in the node capacity.
|
||||
referenced[MetricResource] = resource.NewQuantity(
|
||||
100, resource.DecimalSI,
|
||||
)
|
||||
|
||||
return referenced
|
||||
}
|
||||
|
||||
// ResourceUsage2ResourceThreshold is an implementation of a Normalizer that
|
||||
// converts a set of resource usages and totals into percentage. This function
|
||||
// operates on Quantity Value() for all the resources except CPU, where it uses
|
||||
// MilliValue().
|
||||
func ResourceUsageToResourceThreshold(
|
||||
usages, totals api.ReferencedResourceList,
|
||||
) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
if value == nil || totals[rname] == nil {
|
||||
continue
|
||||
}
|
||||
for resource, value := range usage {
|
||||
nodeCapacityValue := nodeCapacity[resource]
|
||||
if resource == v1.ResourceCPU {
|
||||
total[resource] += api.Percentage(value.MilliValue()) / api.Percentage(nodeCapacityValue.MilliValue()) * 100.0
|
||||
} else {
|
||||
total[resource] += api.Percentage(value.Value()) / api.Percentage(nodeCapacityValue.Value()) * 100.0
|
||||
|
||||
total := totals[rname]
|
||||
used, capacity := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, capacity = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
var percent float64
|
||||
if capacity > 0 {
|
||||
percent = float64(used) / float64(capacity) * 100
|
||||
}
|
||||
|
||||
result[rname] = api.Percentage(percent)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// uniquifyResourceNames returns a slice of resource names with duplicates
|
||||
// removed.
|
||||
func uniquifyResourceNames(resourceNames []v1.ResourceName) []v1.ResourceName {
|
||||
resourceNamesMap := map[v1.ResourceName]bool{
|
||||
v1.ResourceCPU: true,
|
||||
v1.ResourceMemory: true,
|
||||
v1.ResourcePods: true,
|
||||
}
|
||||
for _, resourceName := range resourceNames {
|
||||
resourceNamesMap[resourceName] = true
|
||||
}
|
||||
return slices.Collect(maps.Keys(resourceNamesMap))
|
||||
}
|
||||
|
||||
// filterResourceNamesFromNodeUsage removes from the node usage slice all keys
|
||||
// that are not present in the resourceNames slice.
|
||||
func filterResourceNames(
|
||||
from map[string]api.ReferencedResourceList, resourceNames []v1.ResourceName,
|
||||
) map[string]api.ReferencedResourceList {
|
||||
newNodeUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName, usage := range from {
|
||||
newNodeUsage[nodeName] = api.ReferencedResourceList{}
|
||||
for _, resourceName := range resourceNames {
|
||||
if _, exists := usage[resourceName]; exists {
|
||||
newNodeUsage[nodeName][resourceName] = usage[resourceName]
|
||||
}
|
||||
}
|
||||
}
|
||||
for resource, value := range total {
|
||||
average[resource] = value / api.Percentage(numberOfNodes)
|
||||
}
|
||||
return average
|
||||
return newNodeUsage
|
||||
}
|
||||
|
||||
// capNodeCapacitiesToThreshold caps the node capacities to the given
|
||||
// thresholds. if a threshold is not set for a resource, the full capacity is
|
||||
// returned.
|
||||
func capNodeCapacitiesToThreshold(
|
||||
node *v1.Node,
|
||||
thresholds api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
) api.ReferencedResourceList {
|
||||
capped := api.ReferencedResourceList{}
|
||||
for _, resourceName := range resourceNames {
|
||||
capped[resourceName] = capNodeCapacityToThreshold(
|
||||
node, thresholds, resourceName,
|
||||
)
|
||||
}
|
||||
return capped
|
||||
}
|
||||
|
||||
// capNodeCapacityToThreshold caps the node capacity to the given threshold. if
|
||||
// no threshold is set for the resource, the full capacity is returned.
|
||||
func capNodeCapacityToThreshold(
|
||||
node *v1.Node, thresholds api.ResourceThresholds, resourceName v1.ResourceName,
|
||||
) *resource.Quantity {
|
||||
capacities := referencedResourceListForNodeCapacity(node)
|
||||
if _, ok := capacities[resourceName]; !ok {
|
||||
// if the node knows nothing about the resource we return a
|
||||
// zero capacity for it.
|
||||
return resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
|
||||
// if no threshold is set then we simply return the full capacity.
|
||||
if _, ok := thresholds[resourceName]; !ok {
|
||||
return capacities[resourceName]
|
||||
}
|
||||
|
||||
// now that we have a capacity and a threshold we need to do the math
|
||||
// to cap the former to the latter.
|
||||
quantity := capacities[resourceName]
|
||||
threshold := thresholds[resourceName]
|
||||
|
||||
// we have a different format for memory. all the other resources are
|
||||
// in the DecimalSI format.
|
||||
format := resource.DecimalSI
|
||||
if resourceName == v1.ResourceMemory {
|
||||
format = resource.BinarySI
|
||||
}
|
||||
|
||||
// this is what we use to cap the capacity. thresholds are expected to
|
||||
// be in the <0;100> interval.
|
||||
fraction := func(threshold api.Percentage, capacity int64) int64 {
|
||||
return int64(float64(threshold) * 0.01 * float64(capacity))
|
||||
}
|
||||
|
||||
// here we also vary a little bit. milli is used for cpu, all the rest
|
||||
// goes with the default.
|
||||
if resourceName == v1.ResourceCPU {
|
||||
return resource.NewMilliQuantity(
|
||||
fraction(threshold, quantity.MilliValue()),
|
||||
format,
|
||||
)
|
||||
}
|
||||
|
||||
return resource.NewQuantity(
|
||||
fraction(threshold, quantity.Value()),
|
||||
format,
|
||||
)
|
||||
}
|
||||
|
||||
// assessAvailableResourceInNodes computes the available resources in all the
|
||||
// nodes. this is done by summing up all the available resources in all the
|
||||
// nodes and then subtracting the usage from it.
|
||||
func assessAvailableResourceInNodes(
|
||||
nodes []NodeInfo, resources []v1.ResourceName,
|
||||
) (api.ReferencedResourceList, error) {
|
||||
// available holds a sum of all the resources that can be used to move
|
||||
// pods around. e.g. the sum of all available cpu and memory in all
|
||||
// cluster nodes.
|
||||
available := api.ReferencedResourceList{}
|
||||
for _, node := range nodes {
|
||||
for _, resourceName := range resources {
|
||||
if _, exists := node.usage[resourceName]; !exists {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s resource in node's %s usage, terminating eviction",
|
||||
resourceName, node.node.Name,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX this should never happen. we better bail out
|
||||
// here than hard crash with a segfault.
|
||||
if node.usage[resourceName] == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s usage resources, terminating eviction",
|
||||
resourceName,
|
||||
)
|
||||
}
|
||||
|
||||
// keep the current usage around so we can subtract it
|
||||
// from the available resources.
|
||||
usage := *node.usage[resourceName]
|
||||
|
||||
// first time seeing this resource, initialize it.
|
||||
if _, ok := available[resourceName]; !ok {
|
||||
available[resourceName] = resource.NewQuantity(
|
||||
0, resource.DecimalSI,
|
||||
)
|
||||
}
|
||||
|
||||
// XXX this should never happen. we better bail out
|
||||
// here than hard crash with a segfault.
|
||||
if node.available[resourceName] == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"unable to find %s available resources, terminating eviction",
|
||||
resourceName,
|
||||
)
|
||||
}
|
||||
|
||||
// now we add the capacity and then subtract the usage.
|
||||
available[resourceName].Add(*node.available[resourceName])
|
||||
available[resourceName].Sub(usage)
|
||||
}
|
||||
}
|
||||
|
||||
return available, nil
|
||||
}
|
||||
|
||||
// withResourceRequestForAny returns a filter function that checks if a pod
|
||||
// has a resource request specified for any of the given resources names.
|
||||
func withResourceRequestForAny(names ...v1.ResourceName) pod.FilterFunc {
|
||||
return func(pod *v1.Pod) bool {
|
||||
all := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||
for _, name := range names {
|
||||
for _, container := range all {
|
||||
if _, ok := container.Resources.Requests[name]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,11 +18,16 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/classifier"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization/normalizer"
|
||||
)
|
||||
|
||||
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
|
||||
@@ -55,44 +60,6 @@ var (
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
)
|
||||
|
||||
func TestResourceUsagePercentages(t *testing.T) {
|
||||
resourceUsagePercentage := resourceUsagePercentages(NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
})
|
||||
|
||||
expectedUsageInIntPercentage := map[v1.ResourceName]float64{
|
||||
v1.ResourceCPU: 63,
|
||||
v1.ResourceMemory: 90,
|
||||
v1.ResourcePods: 37,
|
||||
}
|
||||
|
||||
for resourceName, percentage := range expectedUsageInIntPercentage {
|
||||
if math.Floor(resourceUsagePercentage[resourceName]) != percentage {
|
||||
t.Errorf("Incorrect percentange computation, expected %v, got math.Floor(%v) instead", percentage, resourceUsagePercentage[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -103,21 +70,21 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
name: "cpu memory pods",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
@@ -130,17 +97,17 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
name: "memory",
|
||||
nodeInfoList: []NodeInfo{
|
||||
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
|
||||
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
|
||||
nodeInfo.usage = api.ReferencedResourceList{
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
}
|
||||
}),
|
||||
@@ -171,3 +138,442 @@ func TestSortNodesByUsage(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUsageToResourceThreshold(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage api.ReferencedResourceList
|
||||
capacity api.ReferencedResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "10 percent",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 10},
|
||||
},
|
||||
{
|
||||
name: "zeroed out capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 0},
|
||||
},
|
||||
{
|
||||
name: "non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "existing and non existing usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
"does-not-exist": resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{v1.ResourceCPU: 20},
|
||||
},
|
||||
{
|
||||
name: "nil usage",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "nil capacity",
|
||||
usage: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
},
|
||||
capacity: api.ReferencedResourceList{
|
||||
v1.ResourceCPU: nil,
|
||||
},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ResourceUsageToResourceThreshold(tt.usage, tt.capacity)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// This is a test for thresholds being defined as deviations from the average
|
||||
// usage. This is expected to be a little longer test case. We are going to
|
||||
// comment the steps to make it easier to follow.
|
||||
func TestClassificationUsingDeviationThresholds(t *testing.T) {
|
||||
// These are the two thresholds defined by the user. These thresholds
|
||||
// mean that our low limit will be 5 pct points below the average and
|
||||
// the high limit will be 5 pct points above the average.
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
// Create a fake total amount of resources for all nodes. We define
|
||||
// the total amount to 1000 for both memory and cpu. This is so we
|
||||
// can easily calculate (manually) the percentage of usages here.
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("1000"),
|
||||
},
|
||||
)
|
||||
|
||||
// Create a fake usage per server per resource. We are aiming to
|
||||
// have the average of these resources in 50%. When applying the
|
||||
// thresholds we should obtain the low threhold at 45% and the high
|
||||
// threshold at 55%.
|
||||
nodesUsage := map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("480"),
|
||||
v1.ResourceMemory: resource.MustParse("480"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("520"),
|
||||
v1.ResourceMemory: resource.MustParse("520"),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: resource.MustParse("500"),
|
||||
v1.ResourceMemory: resource.MustParse("500"),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: resource.MustParse("900"),
|
||||
v1.ResourceMemory: resource.MustParse("900"),
|
||||
},
|
||||
}
|
||||
|
||||
// Normalize the usage to percentages and then calculate the average
|
||||
// among all nodes.
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ResourceListUsageNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
// Create the thresholds by first applying the deviations and then
|
||||
// replicating once for each node. Thresholds are supposed to be per
|
||||
// node even though the user provides them only once. This is by
|
||||
// design as it opens the possibility for further implementations of
|
||||
// thresholds per node.
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
// Classify the nodes according to the thresholds. Nodes below the low
|
||||
// threshold (45%) are underutilized, nodes above the high threshold
|
||||
// (55%) are overutilized and nodes in between are properly utilized.
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
// we expect the node1 to be undertilized (10%), node2, node3 and node4
|
||||
// to be properly utilized (48%, 52% and 50% respectively) and node5 to
|
||||
// be overutilized (90%).
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// This is almost a copy of TestUsingDeviationThresholds but we are using
|
||||
// pointers here. This is for making sure our generic types are in check. To
|
||||
// understand this code better read comments on TestUsingDeviationThresholds.
|
||||
func TestUsingDeviationThresholdsWithPointers(t *testing.T) {
|
||||
userDefinedThresholds := map[string]api.ResourceThresholds{
|
||||
"low": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
"high": {v1.ResourceCPU: 5, v1.ResourceMemory: 5},
|
||||
}
|
||||
|
||||
nodesTotal := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("1000")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("1000")),
|
||||
},
|
||||
)
|
||||
|
||||
nodesUsage := map[string]map[v1.ResourceName]*resource.Quantity{
|
||||
"node1": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("100")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("100")),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("480")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("480")),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("520")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("520")),
|
||||
},
|
||||
"node4": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("500")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("500")),
|
||||
},
|
||||
"node5": {
|
||||
v1.ResourceCPU: ptr.To(resource.MustParse("900")),
|
||||
v1.ResourceMemory: ptr.To(resource.MustParse("900")),
|
||||
},
|
||||
}
|
||||
|
||||
ptrNormalizer := func(
|
||||
usages, totals map[v1.ResourceName]*resource.Quantity,
|
||||
) api.ResourceThresholds {
|
||||
newUsages := v1.ResourceList{}
|
||||
for name, usage := range usages {
|
||||
newUsages[name] = *usage
|
||||
}
|
||||
newTotals := v1.ResourceList{}
|
||||
for name, total := range totals {
|
||||
newTotals[name] = *total
|
||||
}
|
||||
return ResourceListUsageNormalizer(newUsages, newTotals)
|
||||
}
|
||||
|
||||
usage := normalizer.Normalize(nodesUsage, nodesTotal, ptrNormalizer)
|
||||
average := normalizer.Average(usage)
|
||||
|
||||
thresholds := normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
normalizer.Sum(average, normalizer.Negate(userDefinedThresholds["low"])),
|
||||
normalizer.Sum(average, userDefinedThresholds["high"]),
|
||||
},
|
||||
)
|
||||
|
||||
result := classifier.Classify(
|
||||
usage, thresholds,
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
expected := []map[string]api.ResourceThresholds{
|
||||
{"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10}},
|
||||
{"node5": {v1.ResourceCPU: 90, v1.ResourceMemory: 90}},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", result, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeAndClassify(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
thresholds map[string][]api.ResourceThresholds
|
||||
expected []map[string]api.ResourceThresholds
|
||||
classifiers []classifier.Classifier[string, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "happy path test",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// underutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
"node2": {
|
||||
// overutilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// properly utilized on cpu and memory.
|
||||
v1.ResourceCPU: resource.MustParse("50"),
|
||||
v1.ResourceMemory: resource.MustParse("50"),
|
||||
},
|
||||
"node4": {
|
||||
// underutilized on cpu and overutilized on memory.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "three thresholds",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("10"),
|
||||
},
|
||||
"node2": {
|
||||
// match for the third classifier.
|
||||
v1.ResourceCPU: resource.MustParse("90"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node3": {
|
||||
// match fo the second classifier.
|
||||
v1.ResourceCPU: resource.MustParse("40"),
|
||||
v1.ResourceMemory: resource.MustParse("40"),
|
||||
},
|
||||
"node4": {
|
||||
// matches no classifier.
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("90"),
|
||||
},
|
||||
"node5": {
|
||||
// match for the first classifier.
|
||||
v1.ResourceCPU: resource.MustParse("11"),
|
||||
v1.ResourceMemory: resource.MustParse("18"),
|
||||
},
|
||||
},
|
||||
totals: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
thresholds: normalizer.Replicate(
|
||||
[]string{"node1", "node2", "node3", "node4", "node5"},
|
||||
[]api.ResourceThresholds{
|
||||
{v1.ResourceCPU: 20, v1.ResourceMemory: 20},
|
||||
{v1.ResourceCPU: 50, v1.ResourceMemory: 50},
|
||||
{v1.ResourceCPU: 80, v1.ResourceMemory: 80},
|
||||
},
|
||||
),
|
||||
expected: []map[string]api.ResourceThresholds{
|
||||
{
|
||||
"node1": {v1.ResourceCPU: 10, v1.ResourceMemory: 10},
|
||||
"node5": {v1.ResourceCPU: 11, v1.ResourceMemory: 18},
|
||||
},
|
||||
{
|
||||
"node3": {v1.ResourceCPU: 40, v1.ResourceMemory: 40},
|
||||
},
|
||||
{
|
||||
"node2": {v1.ResourceCPU: 90, v1.ResourceMemory: 90},
|
||||
},
|
||||
},
|
||||
classifiers: []classifier.Classifier[string, api.ResourceThresholds]{
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(usage - limit)
|
||||
},
|
||||
),
|
||||
classifier.ForMap[string, v1.ResourceName, api.Percentage, api.ResourceThresholds](
|
||||
func(usage, limit api.Percentage) int {
|
||||
return int(limit - usage)
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pct := normalizer.Normalize(tt.usage, tt.totals, ResourceListUsageNormalizer)
|
||||
res := classifier.Classify(pct, tt.thresholds, tt.classifiers...)
|
||||
if !reflect.DeepEqual(res, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expecting: %v", res, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
142
pkg/framework/plugins/nodeutilization/normalizer/normalizer.go
Normal file
@@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Normalizer is a function that receives two values of the same type and
|
||||
// return an object of a different type. An usage case can be a function
|
||||
// that converts a memory usage from mb to % (the first argument would be
|
||||
// the memory usage in mb and the second argument would be the total memory
|
||||
// available in mb).
|
||||
type Normalizer[V, N any] func(V, V) N
|
||||
|
||||
// Values is a map of values indexed by a comparable key. An example of this
|
||||
// can be a list of resources indexed by a node name.
|
||||
type Values[K comparable, V any] map[K]V
|
||||
|
||||
// Number is an interface that represents a number. Represents things we
|
||||
// can do math operations on.
|
||||
type Number interface {
|
||||
constraints.Integer | constraints.Float
|
||||
}
|
||||
|
||||
// Normalize uses a Normalizer function to normalize a set of values. For
|
||||
// example one may want to convert a set of memory usages from mb to %.
|
||||
// This function receives a set of usages, a set of totals, and a Normalizer
|
||||
// function. The function will return a map with the normalized values.
|
||||
func Normalize[K comparable, V, N any](usages, totals Values[K, V], fn Normalizer[V, N]) map[K]N {
|
||||
result := Values[K, N]{}
|
||||
for key, value := range usages {
|
||||
total, ok := totals[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
result[key] = fn(value, total)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Replicate replicates the provide value for each key in the provided slice.
|
||||
// Returns a map with the keys and the provided value.
|
||||
func Replicate[K comparable, V any](keys []K, value V) map[K]V {
|
||||
result := map[K]V{}
|
||||
for _, key := range keys {
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Clamp imposes minimum and maximum limits on a set of values. The function
|
||||
// will return a set of values where each value is between the minimum and
|
||||
// maximum values (included). Values below minimum are rounded up to the
|
||||
// minimum value, and values above maximum are rounded down to the maximum
|
||||
// value.
|
||||
func Clamp[K comparable, N Number, V ~map[K]N](values V, minimum, maximum N) V {
|
||||
result := V{}
|
||||
for key := range values {
|
||||
value := values[key]
|
||||
value = N(math.Max(float64(value), float64(minimum)))
|
||||
value = N(math.Min(float64(value), float64(maximum)))
|
||||
result[key] = value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Map applies a function to each element of a map of values. Returns a new
|
||||
// slice with the results of applying the function to each element.
|
||||
func Map[K comparable, N Number, V ~map[K]N](items []V, fn func(V) V) []V {
|
||||
result := []V{}
|
||||
for _, item := range items {
|
||||
result = append(result, fn(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Negate converts the values of a map to their negated values.
|
||||
func Negate[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = -value
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Round rounds the values of a map to the nearest integer. Calls math.Round on
|
||||
// each value of the map.
|
||||
func Round[K comparable, N Number, V ~map[K]N](values V) V {
|
||||
result := V{}
|
||||
for key, value := range values {
|
||||
result[key] = N(math.Round(float64(value)))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Sum sums up the values of two maps. Values are expected to be of Number
|
||||
// type. Original values are preserved. If a key is present in one map but
|
||||
// not in the other, the key is ignored.
|
||||
func Sum[K comparable, N Number, V ~map[K]N](mapA, mapB V) V {
|
||||
result := V{}
|
||||
for name, value := range mapA {
|
||||
result[name] = value + mapB[name]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Average calculates the average of a set of values. This function receives
|
||||
// a map of values and returns the average of all the values. Average expects
|
||||
// the values to represent the same unit of measure. You can use this function
|
||||
// after Normalizing the values.
|
||||
func Average[J, K comparable, N Number, V ~map[J]N](values map[K]V) V {
|
||||
counter := map[J]int{}
|
||||
result := V{}
|
||||
for _, imap := range values {
|
||||
for name, value := range imap {
|
||||
result[name] += value
|
||||
counter[name]++
|
||||
}
|
||||
}
|
||||
|
||||
for name := range result {
|
||||
result[name] /= N(counter[name])
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -0,0 +1,649 @@
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package normalizer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func ResourceListUsageNormalizer(usages, totals v1.ResourceList) api.ResourceThresholds {
|
||||
result := api.ResourceThresholds{}
|
||||
for rname, value := range usages {
|
||||
total, ok := totals[rname]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
used, avail := value.Value(), total.Value()
|
||||
if rname == v1.ResourceCPU {
|
||||
used, avail = value.MilliValue(), total.MilliValue()
|
||||
}
|
||||
|
||||
pct := math.Max(math.Min(float64(used)/float64(avail)*100, 100), 0)
|
||||
result[rname] = api.Percentage(pct)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestNormalizeSimple(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]float64
|
||||
totals map[string]float64
|
||||
expected map[string]float64
|
||||
normalizer Normalizer[float64, float64]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]float64{"cpu": 1},
|
||||
totals: map[string]float64{"cpu": 2},
|
||||
expected: map[string]float64{"cpu": 0.5},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple normalizations",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
"mem": 10,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
"mem": 0.6,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "missing totals for a key",
|
||||
usages: map[string]float64{
|
||||
"cpu": 1,
|
||||
"mem": 6,
|
||||
},
|
||||
totals: map[string]float64{
|
||||
"cpu": 2,
|
||||
},
|
||||
expected: map[string]float64{
|
||||
"cpu": 0.5,
|
||||
},
|
||||
normalizer: func(usage, total float64) float64 {
|
||||
return usage / total
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usages map[string]v1.ResourceList
|
||||
totals map[string]v1.ResourceList
|
||||
expected map[string]api.ResourceThresholds
|
||||
normalizer Normalizer[v1.ResourceList, api.ResourceThresholds]
|
||||
}{
|
||||
{
|
||||
name: "single normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("1")},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {v1.ResourceCPU: resource.MustParse("2")},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {v1.ResourceCPU: 50},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("20"),
|
||||
v1.ResourcePods: resource.MustParse("30"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("120"),
|
||||
v1.ResourceMemory: resource.MustParse("130"),
|
||||
v1.ResourcePods: resource.MustParse("140"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("150"),
|
||||
v1.ResourceMemory: resource.MustParse("160"),
|
||||
v1.ResourcePods: resource.MustParse("170"),
|
||||
},
|
||||
},
|
||||
totals: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
),
|
||||
expected: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
v1.ResourcePods: 100,
|
||||
},
|
||||
),
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
{
|
||||
name: "multiple normalization with over 100% usage and different totals",
|
||||
usages: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("99"),
|
||||
v1.ResourceMemory: resource.MustParse("99Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("8"),
|
||||
v1.ResourceMemory: resource.MustParse("8Gi"),
|
||||
},
|
||||
},
|
||||
totals: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("4Gi"),
|
||||
},
|
||||
},
|
||||
expected: map[string]api.ResourceThresholds{
|
||||
"node1": {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: 99,
|
||||
v1.ResourceMemory: 99,
|
||||
},
|
||||
"node3": {
|
||||
v1.ResourceCPU: 100,
|
||||
v1.ResourceMemory: 100,
|
||||
},
|
||||
},
|
||||
normalizer: ResourceListUsageNormalizer,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := Normalize(tt.usages, tt.totals, tt.normalizer)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAverage(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
usage map[string]v1.ResourceList
|
||||
limits map[string]v1.ResourceList
|
||||
expected api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "empty usage",
|
||||
usage: map[string]v1.ResourceList{},
|
||||
limits: map[string]v1.ResourceList{},
|
||||
expected: api.ResourceThresholds{},
|
||||
},
|
||||
{
|
||||
name: "fifty percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("1"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("6"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
v1.ResourceMemory: resource.MustParse("12"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed percent usage",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("80"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("20"),
|
||||
v1.ResourceMemory: resource.MustParse("60"),
|
||||
v1.ResourcePods: resource.MustParse("20"),
|
||||
},
|
||||
},
|
||||
limits: Replicate(
|
||||
[]string{"node1", "node2"},
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("10000"),
|
||||
},
|
||||
),
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 70,
|
||||
v1.ResourcePods: 0.2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed limits",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("30"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("72"),
|
||||
v1.ResourcePods: resource.MustParse("200"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
v1.ResourceCPU: resource.MustParse("10"),
|
||||
v1.ResourceMemory: resource.MustParse("100"),
|
||||
v1.ResourcePods: resource.MustParse("1000"),
|
||||
},
|
||||
"node2": {
|
||||
v1.ResourceCPU: resource.MustParse("1000"),
|
||||
v1.ResourceMemory: resource.MustParse("180"),
|
||||
v1.ResourcePods: resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50.5,
|
||||
v1.ResourceMemory: 35,
|
||||
v1.ResourcePods: 60,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some nodes missing some resources",
|
||||
usage: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
"usage-exists-in-two": resource.MustParse("20"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("10"),
|
||||
"limit-exists-in-two": resource.MustParse("11"),
|
||||
"limit-does-not-exist": resource.MustParse("12"),
|
||||
"usage-exists-in-all": resource.MustParse("13"),
|
||||
},
|
||||
"node5": {
|
||||
"random-usage-without-limit": resource.MustParse("10"),
|
||||
},
|
||||
},
|
||||
limits: map[string]v1.ResourceList{
|
||||
"node1": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node2": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"limit-exists-in-two": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node3": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node4": {
|
||||
"limit-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-all": resource.MustParse("100"),
|
||||
"usage-exists-in-two": resource.MustParse("100"),
|
||||
"usage-does-not-exist": resource.MustParse("100"),
|
||||
},
|
||||
"node5": {
|
||||
"random-limit-without-usage": resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
expected: api.ResourceThresholds{
|
||||
"limit-exists-in-all": 10,
|
||||
"limit-exists-in-two": 11,
|
||||
"usage-exists-in-all": 13,
|
||||
"usage-exists-in-two": 20,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
average := Average(
|
||||
Normalize(
|
||||
tt.usage, tt.limits, ResourceListUsageNormalizer,
|
||||
),
|
||||
)
|
||||
if !reflect.DeepEqual(average, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v, expected: %v", average, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data api.ResourceThresholds
|
||||
deviations []api.ResourceThresholds
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "single deviation",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 3,
|
||||
v1.ResourceMemory: 3,
|
||||
v1.ResourcePods: 3,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 53,
|
||||
v1.ResourceMemory: 53,
|
||||
v1.ResourcePods: 53,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deviate with negative values",
|
||||
data: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
deviations: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: -2,
|
||||
v1.ResourceMemory: -2,
|
||||
v1.ResourcePods: -2,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: -1,
|
||||
v1.ResourceMemory: -1,
|
||||
v1.ResourcePods: -1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 0,
|
||||
v1.ResourceMemory: 0,
|
||||
v1.ResourcePods: 0,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 1,
|
||||
v1.ResourceMemory: 1,
|
||||
v1.ResourcePods: 1,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 2,
|
||||
v1.ResourceMemory: 2,
|
||||
v1.ResourcePods: 2,
|
||||
},
|
||||
},
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 48,
|
||||
v1.ResourceMemory: 48,
|
||||
v1.ResourcePods: 48,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 49,
|
||||
v1.ResourceMemory: 49,
|
||||
v1.ResourcePods: 49,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 51,
|
||||
v1.ResourceMemory: 51,
|
||||
v1.ResourcePods: 51,
|
||||
},
|
||||
{
|
||||
v1.ResourceCPU: 52,
|
||||
v1.ResourceMemory: 52,
|
||||
v1.ResourcePods: 52,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := []api.ResourceThresholds{}
|
||||
for _, deviation := range tt.deviations {
|
||||
partial := Sum(tt.data, deviation)
|
||||
result = append(result, partial)
|
||||
}
|
||||
|
||||
if len(result) != len(tt.deviations) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
fmt.Printf("%T, %T\n", result, tt.expected)
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClamp(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
data []api.ResourceThresholds
|
||||
minimum api.Percentage
|
||||
maximum api.Percentage
|
||||
expected []api.ResourceThresholds
|
||||
}{
|
||||
{
|
||||
name: "all over the limit",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourceMemory: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some over some below the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 7,
|
||||
v1.ResourceMemory: 8,
|
||||
v1.ResourcePods: 88,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 10,
|
||||
v1.ResourceMemory: 10,
|
||||
v1.ResourcePods: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all within the limits",
|
||||
data: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
minimum: 10,
|
||||
maximum: 20,
|
||||
expected: []api.ResourceThresholds{
|
||||
{
|
||||
v1.ResourceCPU: 15,
|
||||
v1.ResourceMemory: 15,
|
||||
v1.ResourcePods: 15,
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fn := func(thresholds api.ResourceThresholds) api.ResourceThresholds {
|
||||
return Clamp(thresholds, tt.minimum, tt.maximum)
|
||||
}
|
||||
result := Map(tt.data, fn)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Fatalf("unexpected result: %v", result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,18 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
// EvictionMode describe a mode of eviction. See the list below for the
|
||||
// available modes.
|
||||
type EvictionMode string
|
||||
|
||||
const (
|
||||
// EvictionModeOnlyThresholdingResources makes the descheduler evict
|
||||
// only pods that have a resource request defined for any of the user
|
||||
// provided thresholds. If the pod does not request the resource, it
|
||||
// will not be evicted.
|
||||
EvictionModeOnlyThresholdingResources EvictionMode = "OnlyThresholdingResources"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
@@ -28,12 +40,15 @@ type LowNodeUtilizationArgs struct {
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
TargetThresholds api.ResourceThresholds `json:"targetThresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
MetricsUtilization *MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
// but then filtered out before eviction
|
||||
EvictableNamespaces *api.Namespaces `json:"evictableNamespaces,omitempty"`
|
||||
|
||||
// evictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
EvictionLimits *api.EvictionLimits `json:"evictionLimits,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
@@ -42,9 +57,15 @@ type LowNodeUtilizationArgs struct {
|
||||
type HighNodeUtilizationArgs struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
MetricsUtilization MetricsUtilization `json:"metricsUtilization,omitempty"`
|
||||
Thresholds api.ResourceThresholds `json:"thresholds"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
|
||||
// EvictionModes is a set of modes to be taken into account when the
|
||||
// descheduler evicts pods. For example the mode
|
||||
// `OnlyThresholdingResources` can be used to make sure the descheduler
|
||||
// only evicts pods who have resource requests for the defined
|
||||
// thresholds.
|
||||
EvictionModes []EvictionMode `json:"evictionModes,omitempty"`
|
||||
|
||||
// Naming this one differently since namespaces are still
|
||||
// considered while considering resources used by pods
|
||||
@@ -53,8 +74,24 @@ type HighNodeUtilizationArgs struct {
|
||||
}
|
||||
|
||||
// MetricsUtilization allow to consume actual resource utilization from metrics
|
||||
// +k8s:deepcopy-gen=true
|
||||
type MetricsUtilization struct {
|
||||
// metricsServer enables metrics from a kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
// Deprecated. Use Source instead.
|
||||
MetricsServer bool `json:"metricsServer,omitempty"`
|
||||
|
||||
// source enables the plugin to consume metrics from a metrics source.
|
||||
// Currently only KubernetesMetrics available.
|
||||
Source api.MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// prometheus enables metrics collection through a prometheus query.
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
// query returning a vector of samples, each sample labeled with `instance`
|
||||
// corresponding to a node name with each sample value as a real number
|
||||
// in <0; 1> interval.
|
||||
Query string `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
@@ -19,27 +19,54 @@ package nodeutilization
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
promv1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
type UsageClientType int
|
||||
|
||||
const (
|
||||
requestedUsageClientType UsageClientType = iota
|
||||
actualUsageClientType
|
||||
prometheusUsageClientType
|
||||
)
|
||||
|
||||
type notSupportedError struct {
|
||||
usageClientType UsageClientType
|
||||
}
|
||||
|
||||
func (e notSupportedError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func newNotSupportedError(usageClientType UsageClientType) *notSupportedError {
|
||||
return ¬SupportedError{
|
||||
usageClientType: usageClientType,
|
||||
}
|
||||
}
|
||||
|
||||
type usageClient interface {
|
||||
// Both low/high node utilization plugins are expected to invoke sync right
|
||||
// after Balance method is invoked. There's no cache invalidation so each
|
||||
// Balance is expected to get the latest data by invoking sync.
|
||||
sync(nodes []*v1.Node) error
|
||||
nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity
|
||||
sync(ctx context.Context, nodes []*v1.Node) error
|
||||
nodeUtilization(node string) api.ReferencedResourceList
|
||||
pods(node string) []*v1.Pod
|
||||
podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error)
|
||||
podUsage(pod *v1.Pod) (api.ReferencedResourceList, error)
|
||||
}
|
||||
|
||||
type requestedUsageClient struct {
|
||||
@@ -47,7 +74,7 @@ type requestedUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &requestedUsageClient{}
|
||||
@@ -62,7 +89,7 @@ func newRequestedUsageClient(
|
||||
}
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
func (s *requestedUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return s._nodeUtilization[node]
|
||||
}
|
||||
|
||||
@@ -70,16 +97,16 @@ func (s *requestedUsageClient) pods(node string) []*v1.Pod {
|
||||
return s._pods[node]
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
usage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
func (s *requestedUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
usage := make(api.ReferencedResourceList)
|
||||
for _, resourceName := range s.resourceNames {
|
||||
usage[resourceName] = utilptr.To[resource.Quantity](utils.GetResourceRequestQuantity(pod, resourceName).DeepCopy())
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (s *requestedUsageClient) sync(nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
func (s *requestedUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
s._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
s._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
for _, node := range nodes {
|
||||
@@ -111,7 +138,7 @@ type actualUsageClient struct {
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
_nodeUtilization map[string]api.ReferencedResourceList
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
@@ -128,7 +155,7 @@ func newActualUsageClient(
|
||||
}
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
func (client *actualUsageClient) nodeUtilization(node string) api.ReferencedResourceList {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
@@ -136,7 +163,7 @@ func (client *actualUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
func (client *actualUsageClient) podUsage(pod *v1.Pod) (api.ReferencedResourceList, error) {
|
||||
// It's not efficient to keep track of all pods in a cluster when only their fractions is evicted.
|
||||
// Thus, take the current pod metrics without computing any softening (like e.g. EWMA).
|
||||
podMetrics, err := client.metricsCollector.MetricsClient().MetricsV1beta1().PodMetricses(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@@ -144,7 +171,7 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
||||
return nil, fmt.Errorf("unable to get podmetrics for %q/%q: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
|
||||
totalUsage := make(map[v1.ResourceName]*resource.Quantity)
|
||||
totalUsage := make(api.ReferencedResourceList)
|
||||
for _, container := range podMetrics.Containers {
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if resourceName == v1.ResourcePods {
|
||||
@@ -164,8 +191,8 @@ func (client *actualUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*res
|
||||
return totalUsage, nil
|
||||
}
|
||||
|
||||
func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
func (client *actualUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]api.ReferencedResourceList)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodesUsage, err := client.metricsCollector.AllNodesUsage()
|
||||
@@ -180,18 +207,19 @@ func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
nodeUsage, ok := nodesUsage[node.Name]
|
||||
collectedNodeUsage, ok := nodesUsage[node.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
nodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
collectedNodeUsage[v1.ResourcePods] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
|
||||
nodeUsage := api.ReferencedResourceList{}
|
||||
for _, resourceName := range client.resourceNames {
|
||||
if _, exists := nodeUsage[resourceName]; !exists {
|
||||
if _, exists := collectedNodeUsage[resourceName]; !exists {
|
||||
return fmt.Errorf("unable to find %q resource for collected %q node metric", resourceName, node.Name)
|
||||
}
|
||||
nodeUsage[resourceName] = collectedNodeUsage[resourceName]
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsage
|
||||
@@ -199,3 +227,95 @@ func (client *actualUsageClient) sync(nodes []*v1.Node) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type prometheusUsageClient struct {
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
promClient promapi.Client
|
||||
promQuery string
|
||||
|
||||
_pods map[string][]*v1.Pod
|
||||
_nodeUtilization map[string]map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
var _ usageClient = &actualUsageClient{}
|
||||
|
||||
func newPrometheusUsageClient(
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
promClient promapi.Client,
|
||||
promQuery string,
|
||||
) *prometheusUsageClient {
|
||||
return &prometheusUsageClient{
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
promClient: promClient,
|
||||
promQuery: promQuery,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) nodeUtilization(node string) map[v1.ResourceName]*resource.Quantity {
|
||||
return client._nodeUtilization[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) pods(node string) []*v1.Pod {
|
||||
return client._pods[node]
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) podUsage(pod *v1.Pod) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
return nil, newNotSupportedError(prometheusUsageClientType)
|
||||
}
|
||||
|
||||
func NodeUsageFromPrometheusMetrics(ctx context.Context, promClient promapi.Client, promQuery string) (map[string]map[v1.ResourceName]*resource.Quantity, error) {
|
||||
results, warnings, err := promv1.NewAPI(promClient).Query(ctx, promQuery, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to capture prometheus metrics: %v", err)
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
klog.Infof("prometheus metrics warnings: %v", warnings)
|
||||
}
|
||||
|
||||
if results.Type() != model.ValVector {
|
||||
return nil, fmt.Errorf("expected query results to be of type %q, got %q instead", model.ValVector, results.Type())
|
||||
}
|
||||
|
||||
nodeUsages := make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
for _, sample := range results.(model.Vector) {
|
||||
nodeName, exists := sample.Metric["instance"]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("The collected metrics sample is missing 'instance' key")
|
||||
}
|
||||
if sample.Value < 0 || sample.Value > 1 {
|
||||
return nil, fmt.Errorf("The collected metrics sample for %q has value %v outside of <0; 1> interval", string(nodeName), sample.Value)
|
||||
}
|
||||
nodeUsages[string(nodeName)] = map[v1.ResourceName]*resource.Quantity{
|
||||
MetricResource: resource.NewQuantity(int64(sample.Value*100), resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
return nodeUsages, nil
|
||||
}
|
||||
|
||||
func (client *prometheusUsageClient) sync(ctx context.Context, nodes []*v1.Node) error {
|
||||
client._nodeUtilization = make(map[string]map[v1.ResourceName]*resource.Quantity)
|
||||
client._pods = make(map[string][]*v1.Pod)
|
||||
|
||||
nodeUsages, err := NodeUsageFromPrometheusMetrics(ctx, client.promClient, client.promQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodeUsages[node.Name]; !exists {
|
||||
return fmt.Errorf("unable to find metric entry for %v", node.Name)
|
||||
}
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, client.getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
|
||||
return fmt.Errorf("error accessing %q node's pods: %v", node.Name, err)
|
||||
}
|
||||
|
||||
// store the snapshot of pods from the same (or the closest) node utilization computation
|
||||
client._pods[node.Name] = pods
|
||||
client._nodeUtilization[node.Name] = nodeUsages[node.Name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,9 +18,14 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -58,9 +63,9 @@ func updateMetricsAndCheckNodeUtilization(
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture metrics: %v", err)
|
||||
}
|
||||
err = usageClient.sync(nodes)
|
||||
err = usageClient.sync(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to capture a snapshot: %v", err)
|
||||
t.Fatalf("failed to sync a snapshot: %v", err)
|
||||
}
|
||||
nodeUtilization := usageClient.nodeUtilization(nodeName)
|
||||
t.Logf("current node cpu usage: %v\n", nodeUtilization[v1.ResourceCPU].MilliValue())
|
||||
@@ -137,3 +142,158 @@ func TestActualUsageClient(t *testing.T) {
|
||||
metricsClientset, collector, usageClient, nodes, n2.Name, n2metrics,
|
||||
)
|
||||
}
|
||||
|
||||
type fakePromClient struct {
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
}
|
||||
|
||||
type fakePayload struct {
|
||||
Status string `json:"status"`
|
||||
Data queryResult `json:"data"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result interface{} `json:"result"`
|
||||
}
|
||||
|
||||
func (client *fakePromClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return &url.URL{}
|
||||
}
|
||||
|
||||
func (client *fakePromClient) Do(ctx context.Context, request *http.Request) (*http.Response, []byte, error) {
|
||||
jsonData, err := json.Marshal(fakePayload{
|
||||
Status: "success",
|
||||
Data: queryResult{
|
||||
Type: client.dataType,
|
||||
Result: client.result,
|
||||
},
|
||||
})
|
||||
|
||||
return &http.Response{StatusCode: 200}, jsonData, err
|
||||
}
|
||||
|
||||
func sample(metricName, nodeName string, value float64) *model.Sample {
|
||||
return &model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue(metricName),
|
||||
"instance": model.LabelValue(nodeName),
|
||||
},
|
||||
Value: model.SampleValue(value),
|
||||
Timestamp: 1728991761711,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusUsageClient(t *testing.T) {
|
||||
n1 := test.BuildTestNode("ip-10-0-17-165.ec2.internal", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("ip-10-0-51-101.ec2.internal", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("ip-10-0-94-25.ec2.internal", 2000, 3000, 10, nil)
|
||||
|
||||
nodes := []*v1.Node{n1, n2, n3}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 400, 0, n1.Name, nil)
|
||||
p21 := test.BuildTestPod("p21", 400, 0, n2.Name, nil)
|
||||
p22 := test.BuildTestPod("p22", 400, 0, n2.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 400, 0, n3.Name, nil)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
result interface{}
|
||||
dataType model.ValueType
|
||||
nodeUsage map[string]int64
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "valid data",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 0.20381818181818104),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-17-165.ec2.internal", 0.4245454545454522),
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-94-25.ec2.internal", 0.5695757575757561),
|
||||
},
|
||||
nodeUsage: map[string]int64{
|
||||
"ip-10-0-51-101.ec2.internal": 20,
|
||||
"ip-10-0-17-165.ec2.internal": 42,
|
||||
"ip-10-0-94-25.ec2.internal": 56,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid data missing instance label",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
"__name__": model.LabelValue("instance:node_cpu:rate:sum"),
|
||||
},
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample is missing 'instance' key"),
|
||||
},
|
||||
{
|
||||
name: "invalid data value out of range",
|
||||
dataType: model.ValVector,
|
||||
result: model.Vector{
|
||||
sample("instance:node_cpu:rate:sum", "ip-10-0-51-101.ec2.internal", 1.20381818181818104),
|
||||
},
|
||||
err: fmt.Errorf("The collected metrics sample for \"ip-10-0-51-101.ec2.internal\" has value 1.203818181818181 outside of <0; 1> interval"),
|
||||
},
|
||||
{
|
||||
name: "invalid data not a vector",
|
||||
dataType: model.ValScalar,
|
||||
result: model.Scalar{
|
||||
Value: model.SampleValue(0.20381818181818104),
|
||||
Timestamp: 1728991761711,
|
||||
},
|
||||
err: fmt.Errorf("expected query results to be of type \"vector\", got \"scalar\" instead"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pClient := &fakePromClient{
|
||||
result: tc.result,
|
||||
dataType: tc.dataType,
|
||||
}
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3, p1, p21, p22, p3)
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
prometheusUsageClient := newPrometheusUsageClient(podsAssignedToNode, pClient, "instance:node_cpu:rate:sum")
|
||||
err = prometheusUsageClient.sync(ctx, nodes)
|
||||
if tc.err == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected %q error, got nil instead", tc.err)
|
||||
} else if err.Error() != tc.err.Error() {
|
||||
t.Fatalf("expected %q error, got %q instead", tc.err, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeUtil := prometheusUsageClient.nodeUtilization(node.Name)
|
||||
if nodeUtil[MetricResource].Value() != tc.nodeUsage[node.Name] {
|
||||
t.Fatalf("expected %q node utilization to be %v, got %v instead", node.Name, tc.nodeUsage[node.Name], nodeUtil[MetricResource])
|
||||
} else {
|
||||
t.Logf("%v node utilization: %v", node.Name, nodeUtil[MetricResource])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,25 @@ func ValidateHighNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make sure we know about the eviction modes defined by the user.
|
||||
return validateEvictionModes(args.EvictionModes)
|
||||
}
|
||||
|
||||
// validateEvictionModes checks if the eviction modes are valid/known
|
||||
// to the descheduler.
|
||||
func validateEvictionModes(modes []EvictionMode) error {
|
||||
// we are using this approach to make the code more extensible
|
||||
// in the future.
|
||||
validModes := map[EvictionMode]bool{
|
||||
EvictionModeOnlyThresholdingResources: true,
|
||||
}
|
||||
|
||||
for _, mode := range modes {
|
||||
if validModes[mode] {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("invalid eviction mode %s", mode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +62,17 @@ func ValidateLowNodeUtilizationArgs(obj runtime.Object) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.MetricsUtilization != nil {
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.MetricsServer {
|
||||
return fmt.Errorf("it is not allowed to set both %q source and metricsServer", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.KubernetesMetrics && args.MetricsUtilization.Prometheus != nil {
|
||||
return fmt.Errorf("prometheus configuration is not allowed to set when source is set to %q", api.KubernetesMetrics)
|
||||
}
|
||||
if args.MetricsUtilization.Source == api.PrometheusMetrics && (args.MetricsUtilization.Prometheus == nil || args.MetricsUtilization.Prometheus.Query == "") {
|
||||
return fmt.Errorf("prometheus query is required when metrics source is set to %q", api.PrometheusMetrics)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -21,164 +21,239 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func TestValidateLowNodeUtilizationPluginConfig(t *testing.T) {
|
||||
extendedResource := v1.ResourceName("example.com/foo")
|
||||
tests := []struct {
|
||||
name string
|
||||
thresholds api.ResourceThresholds
|
||||
targetThresholds api.ResourceThresholds
|
||||
errInfo error
|
||||
name string
|
||||
args *LowNodeUtilizationArgs
|
||||
errInfo error
|
||||
}{
|
||||
{
|
||||
name: "passing invalid thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 120,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds config is not valid: %v", fmt.Errorf(
|
||||
"%v threshold not in [%v, %v] range", v1.ResourceMemory, MinResourcePercentage, MaxResourcePercentage)),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different num of resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourcePods: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' CPU config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 90,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", v1.ResourceCPU),
|
||||
},
|
||||
{
|
||||
name: "only thresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "only targetThresholds configured extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds and targetThresholds configured different extended resources",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
"example.com/bar": 80,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds and targetThresholds configured different resources"),
|
||||
},
|
||||
{
|
||||
name: "thresholds' extended resource config value is greater than targetThresholds'",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 90,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 20,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", extendedResource),
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "passing valid plugin config with extended resource",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
},
|
||||
errInfo: nil,
|
||||
},
|
||||
{
|
||||
name: "setting both kubernetes metrics source and metricsserver",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("it is not allowed to set both \"KubernetesMetrics\" source and metricsServer"),
|
||||
},
|
||||
{
|
||||
name: "missing prometheus query",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.PrometheusMetrics,
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus query is required when metrics source is set to \"Prometheus\""),
|
||||
},
|
||||
{
|
||||
name: "prometheus set when source set to kubernetes metrics",
|
||||
args: &LowNodeUtilizationArgs{
|
||||
Thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 20,
|
||||
v1.ResourceMemory: 20,
|
||||
extendedResource: 20,
|
||||
},
|
||||
TargetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 80,
|
||||
v1.ResourceMemory: 80,
|
||||
extendedResource: 80,
|
||||
},
|
||||
MetricsUtilization: &MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
Prometheus: &Prometheus{},
|
||||
},
|
||||
},
|
||||
errInfo: fmt.Errorf("prometheus configuration is not allowed to set when source is set to \"KubernetesMetrics\""),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
args := &LowNodeUtilizationArgs{
|
||||
Thresholds: testCase.thresholds,
|
||||
TargetThresholds: testCase.targetThresholds,
|
||||
}
|
||||
validateErr := validateLowNodeUtilizationThresholds(args.Thresholds, args.TargetThresholds, false)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
validateErr := ValidateLowNodeUtilizationArgs(runtime.Object(testCase.args))
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: %q but got %q instead", testCase.errInfo, validateErr)
|
||||
}
|
||||
} else if validateErr.Error() != testCase.errInfo.Error() {
|
||||
t.Errorf("expected validity of plugin config: thresholds %#v targetThresholds %#v to be %v but got %v instead",
|
||||
testCase.thresholds, testCase.targetThresholds, testCase.errInfo, validateErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,11 @@ func (in *HighNodeUtilizationArgs) DeepCopyInto(out *HighNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.EvictionModes != nil {
|
||||
in, out := &in.EvictionModes, &out.EvictionModes
|
||||
*out = make([]EvictionMode, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
@@ -82,12 +86,21 @@ func (in *LowNodeUtilizationArgs) DeepCopyInto(out *LowNodeUtilizationArgs) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.MetricsUtilization = in.MetricsUtilization
|
||||
if in.MetricsUtilization != nil {
|
||||
in, out := &in.MetricsUtilization, &out.MetricsUtilization
|
||||
*out = new(MetricsUtilization)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictableNamespaces != nil {
|
||||
in, out := &in.EvictableNamespaces, &out.EvictableNamespaces
|
||||
*out = new(api.Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EvictionLimits != nil {
|
||||
in, out := &in.EvictionLimits, &out.EvictionLimits
|
||||
*out = new(api.EvictionLimits)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -108,3 +121,24 @@ func (in *LowNodeUtilizationArgs) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsUtilization) DeepCopyInto(out *MetricsUtilization) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsUtilization.
|
||||
func (in *MetricsUtilization) DeepCopy() *MetricsUtilization {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsUtilization)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -417,18 +417,23 @@ func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable
|
||||
// followed by the highest priority pods with affinity or nodeSelector
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
// any non-evictable pods should be considered last (ie, first in the list)
|
||||
if !isEvictable(list[i]) || !isEvictable(list[j]) {
|
||||
evictableI := isEvictable(list[i])
|
||||
evictableJ := isEvictable(list[j])
|
||||
|
||||
if !evictableI || !evictableJ {
|
||||
// false - i is the only non-evictable, so return true to put it first
|
||||
// true - j is non-evictable, so return false to put j before i
|
||||
// if true and both and non-evictable, order doesn't matter
|
||||
return !(isEvictable(list[i]) && !isEvictable(list[j]))
|
||||
return !(evictableI && !evictableJ)
|
||||
}
|
||||
|
||||
hasSelectorOrAffinityI := hasSelectorOrAffinity(*list[i])
|
||||
hasSelectorOrAffinityJ := hasSelectorOrAffinity(*list[j])
|
||||
// if both pods have selectors/affinity, compare them by their priority
|
||||
if hasSelectorOrAffinity(*list[i]) == hasSelectorOrAffinity(*list[j]) {
|
||||
comparePodsByPriority(list[i], list[j])
|
||||
if hasSelectorOrAffinityI == hasSelectorOrAffinityJ {
|
||||
// Sort by priority in ascending order (lower priority Pods first)
|
||||
return !comparePodsByPriority(list[i], list[j])
|
||||
}
|
||||
return hasSelectorOrAffinity(*list[i]) && !hasSelectorOrAffinity(*list[j])
|
||||
return hasSelectorOrAffinityI && !hasSelectorOrAffinityJ
|
||||
})
|
||||
sortedTopologies = append(sortedTopologies, topology{pair: pair, pods: list})
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package removepodsviolatingtopologyspreadconstraint
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -1476,6 +1477,231 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortDomains(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
constraintTopology map[topologyPair][]*v1.Pod
|
||||
want []topology
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
constraintTopology: map[topologyPair][]*v1.Pod{},
|
||||
want: []topology{},
|
||||
},
|
||||
{
|
||||
name: "single domain with mixed pods",
|
||||
constraintTopology: map[topologyPair][]*v1.Pod{
|
||||
{"zone", "a"}: {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "non-evictable-pod",
|
||||
Annotations: map[string]string{"evictable": "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "evictable-with-affinity",
|
||||
Annotations: map[string]string{"evictable": "true", "hasSelectorOrAffinity": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](10),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "evictable-high-priority",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []topology{
|
||||
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "non-evictable-pod",
|
||||
Annotations: map[string]string{"evictable": "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "evictable-with-affinity",
|
||||
Annotations: map[string]string{"evictable": "true", "hasSelectorOrAffinity": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](10),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "evictable-high-priority",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple domains with different priorities and selectors",
|
||||
constraintTopology: map[topologyPair][]*v1.Pod{
|
||||
{"zone", "a"}: {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "high-priority-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](20),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "low-priority-no-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](5),
|
||||
},
|
||||
},
|
||||
},
|
||||
{"zone", "b"}: {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "medium-priority-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "non-evictable-pod",
|
||||
Annotations: map[string]string{"evictable": "false"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []topology{
|
||||
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "low-priority-no-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](5),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "high-priority-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](20),
|
||||
},
|
||||
},
|
||||
}},
|
||||
{pair: topologyPair{"zone", "b"}, pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "non-evictable-pod",
|
||||
Annotations: map[string]string{"evictable": "false"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "medium-priority-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "domain with pods having different selector/affinity",
|
||||
constraintTopology: map[topologyPair][]*v1.Pod{
|
||||
{"zone", "a"}: {
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-with-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
},
|
||||
Priority: utilptr.To[int32](10),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-no-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []topology{
|
||||
{pair: topologyPair{"zone", "a"}, pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-with-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{},
|
||||
},
|
||||
Priority: utilptr.To[int32](10),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-no-affinity",
|
||||
Annotations: map[string]string{"evictable": "true"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: utilptr.To[int32](15),
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mockIsEvictable := func(pod *v1.Pod) bool {
|
||||
if val, exists := pod.Annotations["evictable"]; exists {
|
||||
return val == "true"
|
||||
}
|
||||
return false
|
||||
}
|
||||
got := sortDomains(tt.constraintTopology, mockIsEvictable)
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return got[i].pair.value < got[j].pair.value
|
||||
})
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("sortDomains() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testPodList struct {
|
||||
count int
|
||||
node string
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
@@ -68,6 +69,7 @@ func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.Ev
|
||||
// handleImpl implements the framework handle which gets passed to plugins
|
||||
type handleImpl struct {
|
||||
clientSet clientset.Interface
|
||||
prometheusClient promapi.Client
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
@@ -81,6 +83,10 @@ func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
}
|
||||
|
||||
func (hi *handleImpl) PrometheusClient() promapi.Client {
|
||||
return hi.prometheusClient
|
||||
}
|
||||
|
||||
func (hi *handleImpl) MetricsCollector() *metricscollector.MetricsCollector {
|
||||
return hi.metricsCollector
|
||||
}
|
||||
@@ -131,6 +137,7 @@ type Option func(*handleImplOpts)
|
||||
|
||||
type handleImplOpts struct {
|
||||
clientSet clientset.Interface
|
||||
prometheusClient promapi.Client
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
podEvictor *evictions.PodEvictor
|
||||
@@ -144,6 +151,13 @@ func WithClientSet(clientSet clientset.Interface) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrometheusClient sets Prometheus client for the scheduling frameworkImpl.
|
||||
func WithPrometheusClient(prometheusClient promapi.Client) Option {
|
||||
return func(o *handleImplOpts) {
|
||||
o.prometheusClient = prometheusClient
|
||||
}
|
||||
}
|
||||
|
||||
func WithSharedInformerFactory(sharedInformerFactory informers.SharedInformerFactory) Option {
|
||||
return func(o *handleImplOpts) {
|
||||
o.sharedInformerFactory = sharedInformerFactory
|
||||
@@ -267,6 +281,7 @@ func NewProfile(config api.DeschedulerProfile, reg pluginregistry.Registry, opts
|
||||
podEvictor: hOpts.podEvictor,
|
||||
},
|
||||
metricsCollector: hOpts.metricsCollector,
|
||||
prometheusClient: hOpts.prometheusClient,
|
||||
}
|
||||
|
||||
pluginNames := append(config.Plugins.Deschedule.Enabled, config.Plugins.Balance.Enabled...)
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
)
|
||||
|
||||
// Handle provides handles used by plugins to retrieve a kubernetes client set,
|
||||
@@ -34,6 +36,7 @@ import (
|
||||
type Handle interface {
|
||||
// ClientSet returns a kubernetes clientSet.
|
||||
ClientSet() clientset.Interface
|
||||
PrometheusClient() promapi.Client
|
||||
Evictor() Evictor
|
||||
GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc
|
||||
SharedInformerFactory() informers.SharedInformerFactory
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -121,7 +121,7 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st
|
||||
klog.V(5).InfoS("no name provided, using default service name for tracing", "name", DefaultServiceName)
|
||||
name = DefaultServiceName
|
||||
}
|
||||
resourceOpts := []sdkresource.Option{sdkresource.WithAttributes(semconv.ServiceNameKey.String(name)), sdkresource.WithSchemaURL(semconv.SchemaURL), sdkresource.WithProcess()}
|
||||
resourceOpts := defaultResourceOpts(name)
|
||||
if namespace != "" {
|
||||
resourceOpts = append(resourceOpts, sdkresource.WithAttributes(semconv.ServiceNamespaceKey.String(namespace)))
|
||||
}
|
||||
@@ -141,6 +141,10 @@ func NewTracerProvider(ctx context.Context, endpoint, caCert, name, namespace st
|
||||
return
|
||||
}
|
||||
|
||||
func defaultResourceOpts(name string) []sdkresource.Option {
|
||||
return []sdkresource.Option{sdkresource.WithAttributes(semconv.ServiceNameKey.String(name)), sdkresource.WithSchemaURL(semconv.SchemaURL), sdkresource.WithProcess()}
|
||||
}
|
||||
|
||||
// Shutdown shuts down the global trace exporter.
|
||||
func Shutdown(ctx context.Context) error {
|
||||
tp, ok := provider.(*sdktrace.TracerProvider)
|
||||
|
||||
17
pkg/tracing/tracing_test.go
Normal file
17
pkg/tracing/tracing_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
sdkresource "go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
func TestCreateTraceableResource(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
resourceOpts := defaultResourceOpts("descheduler")
|
||||
_, err := sdkresource.New(ctx, resourceOpts...)
|
||||
if err != nil {
|
||||
t.Errorf("error initialising tracer provider: %!", err)
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -55,56 +56,13 @@ func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.Set[string
|
||||
return true
|
||||
}
|
||||
|
||||
// The following code has been copied from predicates package to avoid the
|
||||
// huge vendoring issues, mostly copied from
|
||||
// k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/
|
||||
// Some minor changes have been made to ease the imports, but most of the code
|
||||
// remains untouched
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if node == nil {
|
||||
return false, fmt.Errorf("node not found")
|
||||
}
|
||||
return podMatchesNodeLabels(pod, node), nil
|
||||
}
|
||||
|
||||
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||
if len(pod.Spec.NodeSelector) > 0 {
|
||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||
if !selector.Matches(labels.Set(node.Labels)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes)
|
||||
// 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes
|
||||
// 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity
|
||||
// 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes
|
||||
// 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity
|
||||
// 6. non-nil empty NodeSelectorRequirement is not allowed
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && affinity.NodeAffinity != nil {
|
||||
nodeAffinity := affinity.NodeAffinity
|
||||
// if no required NodeAffinity requirements, will do no-op, means select all nodes.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Match node selector for requiredDuringSchedulingIgnoredDuringExecution.
|
||||
if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
klog.V(10).InfoS("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
matches, err := corev1.MatchNodeSelectorTerms(node, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error parsing node selector", "selector", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
}
|
||||
return true
|
||||
nodeRequiredAffinity := nodeaffinity.GetRequiredNodeAffinity(pod)
|
||||
return nodeRequiredAffinity.Match(node)
|
||||
}
|
||||
|
||||
func uniqueSortNodeSelectorTerms(srcTerms []v1.NodeSelectorTerm) []v1.NodeSelectorTerm {
|
||||
@@ -317,6 +275,7 @@ func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) (int32,
|
||||
match, err := corev1.MatchNodeSelectorTerms(node, preferredNodeSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "error parsing node selector", "selector", preferredNodeSelector)
|
||||
continue
|
||||
}
|
||||
if match {
|
||||
sumWeights += prefSchedulTerm.Weight
|
||||
|
||||
@@ -43,7 +43,7 @@ import (
|
||||
|
||||
func lowNodeUtilizationPolicy(lowNodeUtilizationArgs *nodeutilization.LowNodeUtilizationArgs, evictorArgs *defaultevictor.DefaultEvictorArgs, metricsCollectorEnabled bool) *apiv1alpha2.DeschedulerPolicy {
|
||||
return &apiv1alpha2.DeschedulerPolicy{
|
||||
MetricsCollector: apiv1alpha2.MetricsCollector{
|
||||
MetricsCollector: &apiv1alpha2.MetricsCollector{
|
||||
Enabled: metricsCollectorEnabled,
|
||||
},
|
||||
Profiles: []apiv1alpha2.DeschedulerProfile{
|
||||
@@ -111,7 +111,7 @@ func TestLowNodeUtilizationKubernetesMetrics(t *testing.T) {
|
||||
testLabel := map[string]string{"app": "test-lownodeutilization-kubernetes-metrics", "name": "test-lownodeutilization-kubernetes-metrics"}
|
||||
deploymentObj := buildTestDeployment("lownodeutilization-kubernetes-metrics-pod", testNamespace.Name, 0, testLabel, nil)
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Image = "narmidm/k8s-pod-cpu-stressor:latest"
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Args = []string{"-cpu=3", "-duration=10s", "-forever"}
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Args = []string{"-cpu=1.0", "-duration=10s", "-forever"}
|
||||
deploymentObj.Spec.Template.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3000m"),
|
||||
@@ -147,8 +147,8 @@ func TestLowNodeUtilizationKubernetesMetrics(t *testing.T) {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
evictorArgs: &defaultevictor.DefaultEvictorArgs{},
|
||||
@@ -171,8 +171,8 @@ func TestLowNodeUtilizationKubernetesMetrics(t *testing.T) {
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
MetricsUtilization: nodeutilization.MetricsUtilization{
|
||||
MetricsServer: true,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: api.KubernetesMetrics,
|
||||
},
|
||||
},
|
||||
evictorArgs: &defaultevictor.DefaultEvictorArgs{},
|
||||
|
||||
@@ -43,7 +43,7 @@ import (
|
||||
|
||||
const deploymentReplicas = 4
|
||||
|
||||
func tooManyRestartsPolicy(targetNamespace string, podRestartThresholds int32, includingInitContainers bool) *apiv1alpha2.DeschedulerPolicy {
|
||||
func tooManyRestartsPolicy(targetNamespace string, podRestartThresholds int32, includingInitContainers bool, gracePeriodSeconds int64) *apiv1alpha2.DeschedulerPolicy {
|
||||
return &apiv1alpha2.DeschedulerPolicy{
|
||||
Profiles: []apiv1alpha2.DeschedulerProfile{
|
||||
{
|
||||
@@ -84,6 +84,7 @@ func tooManyRestartsPolicy(targetNamespace string, podRestartThresholds int32, i
|
||||
},
|
||||
},
|
||||
},
|
||||
GracePeriodSeconds: &gracePeriodSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,16 +128,17 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
policy *apiv1alpha2.DeschedulerPolicy
|
||||
enableGracePeriod bool
|
||||
expectedEvictedPodCount uint
|
||||
}{
|
||||
{
|
||||
name: "test-no-evictions",
|
||||
policy: tooManyRestartsPolicy(testNamespace.Name, 10000, true),
|
||||
policy: tooManyRestartsPolicy(testNamespace.Name, 10000, true, 0),
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
name: "test-one-evictions",
|
||||
policy: tooManyRestartsPolicy(testNamespace.Name, 3, true),
|
||||
policy: tooManyRestartsPolicy(testNamespace.Name, 3, true, 0),
|
||||
expectedEvictedPodCount: 4,
|
||||
},
|
||||
}
|
||||
@@ -196,9 +198,8 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
if len(deschedulerPods) != 0 {
|
||||
deschedulerPodName = deschedulerPods[0].Name
|
||||
}
|
||||
|
||||
// Run RemovePodsHavingTooManyRestarts strategy
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 20*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 50*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
currentRunNames := sets.NewString(getCurrentPodNames(ctx, clientSet, testNamespace.Name, t)...)
|
||||
actualEvictedPod := preRunNames.Difference(currentRunNames)
|
||||
actualEvictedPodCount := uint(actualEvictedPod.Len())
|
||||
@@ -210,7 +211,7 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Errorf("Error waiting for descheduler running: %v", err)
|
||||
t.Fatalf("Error waiting for descheduler running: %v", err)
|
||||
}
|
||||
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
|
||||
})
|
||||
|
||||
@@ -38,12 +38,12 @@ echo "DESCHEDULER_IMAGE: ${DESCHEDULER_IMAGE}"
|
||||
|
||||
# This just runs e2e tests.
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.32.0}
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.33.0}
|
||||
if [ -z "${SKIP_KUBECTL_INSTALL}" ]; then
|
||||
curl -Lo kubectl https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
fi
|
||||
if [ -z "${SKIP_KIND_INSTALL}" ]; then
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.26.0/kind-linux-amd64
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.27.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
|
||||
82
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
82
vendor/github.com/go-logfmt/logfmt/README.md
generated
vendored
@@ -1,41 +1,41 @@
|
||||
# logfmt
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logfmt/logfmt)
|
||||
[](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||
[](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
|
||||
[](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
|
||||
|
||||
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||
format][fmt]. It provides an API similar to [encoding/json][json] and
|
||||
[encoding/xml][xml].
|
||||
|
||||
[fmt]: https://brandur.org/logfmt
|
||||
[json]: https://pkg.go.dev/encoding/json
|
||||
[xml]: https://pkg.go.dev/encoding/xml
|
||||
|
||||
The logfmt format was first documented by Brandur Leach in [this
|
||||
article][origin]. The format has not been formally standardized. The most
|
||||
authoritative public specification to date has been the documentation of a Go
|
||||
Language [package][parser] written by Blake Mizerany and Keith Rarick.
|
||||
|
||||
[origin]: https://brandur.org/logfmt
|
||||
[parser]: https://pkg.go.dev/github.com/kr/logfmt
|
||||
|
||||
## Goals
|
||||
|
||||
This project attempts to conform as closely as possible to the prior art, while
|
||||
also removing ambiguity where necessary to provide well behaved encoder and
|
||||
decoder implementations.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This project does not attempt to formally standardize the logfmt format. In the
|
||||
event that logfmt is standardized this project would take conforming to the
|
||||
standard as a goal.
|
||||
|
||||
## Versioning
|
||||
|
||||
This project publishes releases according to the Go language guidelines for
|
||||
[developing and publishing modules][pub].
|
||||
|
||||
[pub]: https://go.dev/doc/modules/developing
|
||||
# logfmt
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logfmt/logfmt)
|
||||
[](https://goreportcard.com/report/go-logfmt/logfmt)
|
||||
[](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml)
|
||||
[](https://coveralls.io/github/go-logfmt/logfmt?branch=main)
|
||||
|
||||
Package logfmt implements utilities to marshal and unmarshal data in the [logfmt
|
||||
format][fmt]. It provides an API similar to [encoding/json][json] and
|
||||
[encoding/xml][xml].
|
||||
|
||||
[fmt]: https://brandur.org/logfmt
|
||||
[json]: https://pkg.go.dev/encoding/json
|
||||
[xml]: https://pkg.go.dev/encoding/xml
|
||||
|
||||
The logfmt format was first documented by Brandur Leach in [this
|
||||
article][origin]. The format has not been formally standardized. The most
|
||||
authoritative public specification to date has been the documentation of a Go
|
||||
Language [package][parser] written by Blake Mizerany and Keith Rarick.
|
||||
|
||||
[origin]: https://brandur.org/logfmt
|
||||
[parser]: https://pkg.go.dev/github.com/kr/logfmt
|
||||
|
||||
## Goals
|
||||
|
||||
This project attempts to conform as closely as possible to the prior art, while
|
||||
also removing ambiguity where necessary to provide well behaved encoder and
|
||||
decoder implementations.
|
||||
|
||||
## Non-goals
|
||||
|
||||
This project does not attempt to formally standardize the logfmt format. In the
|
||||
event that logfmt is standardized this project would take conforming to the
|
||||
standard as a goal.
|
||||
|
||||
## Versioning
|
||||
|
||||
This project publishes releases according to the Go language guidelines for
|
||||
[developing and publishing modules][pub].
|
||||
|
||||
[pub]: https://go.dev/doc/modules/developing
|
||||
|
||||
2
vendor/github.com/golang/glog/glog.go
generated
vendored
2
vendor/github.com/golang/glog/glog.go
generated
vendored
@@ -76,7 +76,7 @@
|
||||
// -log_backtrace_at=gopherflakes.go:234
|
||||
// A stack trace will be written to the Info log whenever execution
|
||||
// hits one of these statements. (Unlike with -vmodule, the ".go"
|
||||
// must bepresent.)
|
||||
// must be present.)
|
||||
// -v=0
|
||||
// Enable V-leveled logging at the specified level.
|
||||
// -vmodule=""
|
||||
|
||||
71
vendor/github.com/golang/glog/glog_file.go
generated
vendored
71
vendor/github.com/golang/glog/glog_file.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -68,9 +67,8 @@ func init() {
|
||||
host = shortHostname(h)
|
||||
}
|
||||
|
||||
current, err := user.Current()
|
||||
if err == nil {
|
||||
userName = current.Username
|
||||
if u := lookupUser(); u != "" {
|
||||
userName = u
|
||||
}
|
||||
// Sanitize userName since it is used to construct file paths.
|
||||
userName = strings.Map(func(r rune) rune {
|
||||
@@ -118,32 +116,53 @@ var onceLogDirs sync.Once
|
||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
func create(tag string, t time.Time, dir string) (f *os.File, filename string, err error) {
|
||||
if dir != "" {
|
||||
f, name, err := createInDir(dir, tag, t)
|
||||
if err == nil {
|
||||
return f, name, err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", err)
|
||||
}
|
||||
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
}
|
||||
name, link := logName(tag, t)
|
||||
var lastErr error
|
||||
for _, dir := range logDirs {
|
||||
fname := filepath.Join(dir, name)
|
||||
f, err := os.Create(fname)
|
||||
f, name, err := createInDir(dir, tag, t)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
if *logLink != "" {
|
||||
lsymlink := filepath.Join(*logLink, link)
|
||||
os.Remove(lsymlink) // ignore err
|
||||
os.Symlink(fname, lsymlink) // ignore err
|
||||
}
|
||||
return f, fname, nil
|
||||
return f, name, err
|
||||
}
|
||||
lastErr = err
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
||||
}
|
||||
|
||||
func createInDir(dir, tag string, t time.Time) (f *os.File, name string, err error) {
|
||||
name, link := logName(tag, t)
|
||||
fname := filepath.Join(dir, name)
|
||||
// O_EXCL is important here, as it prevents a vulnerability. The general idea is that logs often
|
||||
// live in an insecure directory (like /tmp), so an unprivileged attacker could create fname in
|
||||
// advance as a symlink to a file the logging process can access, but the attacker cannot. O_EXCL
|
||||
// fails the open if it already exists, thus prevent our this code from opening the existing file
|
||||
// the attacker points us to.
|
||||
f, err = os.OpenFile(fname, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err == nil {
|
||||
symlink := filepath.Join(dir, link)
|
||||
os.Remove(symlink) // ignore err
|
||||
os.Symlink(name, symlink) // ignore err
|
||||
if *logLink != "" {
|
||||
lsymlink := filepath.Join(*logLink, link)
|
||||
os.Remove(lsymlink) // ignore err
|
||||
os.Symlink(fname, lsymlink) // ignore err
|
||||
}
|
||||
return f, fname, nil
|
||||
}
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// flushSyncWriter is the interface satisfied by logging destinations.
|
||||
type flushSyncWriter interface {
|
||||
Flush() error
|
||||
@@ -160,7 +179,10 @@ var sinks struct {
|
||||
func init() {
|
||||
// Register stderr first: that way if we crash during file-writing at least
|
||||
// the log will have gone somewhere.
|
||||
logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file)
|
||||
if shouldRegisterStderrSink() {
|
||||
logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr)
|
||||
}
|
||||
logsink.TextSinks = append(logsink.TextSinks, &sinks.file)
|
||||
|
||||
sinks.file.flushChan = make(chan logsink.Severity, 1)
|
||||
go sinks.file.flushDaemon()
|
||||
@@ -247,6 +269,7 @@ type syncBuffer struct {
|
||||
names []string
|
||||
sev logsink.Severity
|
||||
nbytes uint64 // The number of bytes written to this file
|
||||
madeAt time.Time
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Sync() error {
|
||||
@@ -254,9 +277,14 @@ func (sb *syncBuffer) Sync() error {
|
||||
}
|
||||
|
||||
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
|
||||
// Rotate the file if it is too large, but ensure we only do so,
|
||||
// if rotate doesn't create a conflicting filename.
|
||||
if sb.nbytes+uint64(len(p)) >= MaxSize {
|
||||
if err := sb.rotateFile(time.Now()); err != nil {
|
||||
return 0, err
|
||||
now := timeNow()
|
||||
if now.After(sb.madeAt.Add(1*time.Second)) || now.Second() != sb.madeAt.Second() {
|
||||
if err := sb.rotateFile(now); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
n, err = sb.Writer.Write(p)
|
||||
@@ -274,7 +302,8 @@ const footer = "\nCONTINUED IN NEXT FILE\n"
|
||||
func (sb *syncBuffer) rotateFile(now time.Time) error {
|
||||
var err error
|
||||
pn := "<none>"
|
||||
file, name, err := create(sb.sev.String(), now)
|
||||
file, name, err := create(sb.sev.String(), now, "")
|
||||
sb.madeAt = now
|
||||
|
||||
if sb.file != nil {
|
||||
// The current log file becomes the previous log at the end of
|
||||
|
||||
19
vendor/github.com/golang/glog/glog_file_nonwindows.go
generated
vendored
Normal file
19
vendor/github.com/golang/glog/glog_file_nonwindows.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
//go:build !windows
|
||||
|
||||
package glog
|
||||
|
||||
import "os/user"
|
||||
|
||||
// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr.
|
||||
// Today, this always returns true on non-Windows platforms, as it specifically checks for a
|
||||
// condition that is only present on Windows.
|
||||
func shouldRegisterStderrSink() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func lookupUser() string {
|
||||
if current, err := user.Current(); err == nil {
|
||||
return current.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
43
vendor/github.com/golang/glog/glog_file_windows.go
generated
vendored
Normal file
43
vendor/github.com/golang/glog/glog_file_windows.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
//go:build windows
|
||||
|
||||
package glog
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// shouldRegisterStderrSink determines whether we should register a log sink that writes to stderr.
|
||||
// Today, this checks if stderr is "valid", in that it maps to a non-NULL Handle.
|
||||
// Windows Services are spawned without Stdout and Stderr, so any attempt to use them equates to
|
||||
// referencing an invalid file Handle.
|
||||
// os.Stderr's FD is derived from a call to `syscall.GetStdHandle(syscall.STD_ERROR_HANDLE)`.
|
||||
// Documentation[1] for the GetStdHandle function indicates the return value may be NULL if the
|
||||
// application lacks the standard handle, so consider Stderr valid if its FD is non-NULL.
|
||||
// [1]: https://learn.microsoft.com/en-us/windows/console/getstdhandle
|
||||
func shouldRegisterStderrSink() bool {
|
||||
return os.Stderr.Fd() != 0
|
||||
}
|
||||
|
||||
// This follows the logic in the standard library's user.Current() function, except
|
||||
// that it leaves out the potentially expensive calls required to look up the user's
|
||||
// display name in Active Directory.
|
||||
func lookupUser() string {
|
||||
token, err := syscall.OpenCurrentProcessToken()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer token.Close()
|
||||
tokenUser, err := token.GetTokenUser()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
username, _, accountType, err := tokenUser.User.Sid.LookupAccount("")
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if accountType != syscall.SidTypeUser {
|
||||
return ""
|
||||
}
|
||||
return username
|
||||
}
|
||||
180
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
180
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
@@ -1,180 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
|
||||
anypb "github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const urlPrefix = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
||||
// Most type assertions should use the Is function instead.
|
||||
//
|
||||
// Deprecated: Call the any.MessageName method instead.
|
||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
||||
name, err := anyMessageName(any)
|
||||
return string(name), err
|
||||
}
|
||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
name := protoreflect.FullName(any.TypeUrl)
|
||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
||||
name = name[i+len("/"):]
|
||||
}
|
||||
if !name.IsValid() {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
||||
//
|
||||
// Deprecated: Call the anypb.New function instead.
|
||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
||||
switch dm := m.(type) {
|
||||
case DynamicAny:
|
||||
m = dm.Message
|
||||
case *DynamicAny:
|
||||
if dm == nil {
|
||||
return nil, proto.ErrNil
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
b, err := proto.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
||||
}
|
||||
|
||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
||||
// be resolved in the global registry.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
|
||||
// to resolve the message name and create a new instance of it.
|
||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
||||
name, err := anyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.MessageV1(mt.New().Interface()), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
||||
// into the provided message m. It returns an error if the target message
|
||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
||||
//
|
||||
// The target message m may be a *DynamicAny message. If the underlying message
|
||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
||||
//
|
||||
// Deprecated: Call the any.UnmarshalTo method instead.
|
||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
||||
if dm, ok := m.(*DynamicAny); ok {
|
||||
if dm.Message == nil {
|
||||
var err error
|
||||
dm.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m = dm.Message
|
||||
}
|
||||
|
||||
anyName, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgName := proto.MessageName(m)
|
||||
if anyName != msgName {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, m)
|
||||
}
|
||||
|
||||
// Is reports whether the Any message contains a message of the specified type.
|
||||
//
|
||||
// Deprecated: Call the any.MessageIs method instead.
|
||||
func Is(any *anypb.Any, m proto.Message) bool {
|
||||
if any == nil || m == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(m)
|
||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
||||
return false
|
||||
}
|
||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
||||
// The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
//
|
||||
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
|
||||
// the any message contents into a new instance of the underlying message.
|
||||
type DynamicAny struct{ proto.Message }
|
||||
|
||||
func (m DynamicAny) String() string {
|
||||
if m.Message == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return m.Message.String()
|
||||
}
|
||||
func (m DynamicAny) Reset() {
|
||||
if m.Message == nil {
|
||||
return
|
||||
}
|
||||
m.Message.Reset()
|
||||
}
|
||||
func (m DynamicAny) ProtoMessage() {
|
||||
return
|
||||
}
|
||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
||||
if m.Message == nil {
|
||||
return nil
|
||||
}
|
||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
||||
}
|
||||
|
||||
type dynamicAny struct{ protoreflect.Message }
|
||||
|
||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
||||
return dynamicAnyType{m.Message.Type()}
|
||||
}
|
||||
func (m dynamicAny) New() protoreflect.Message {
|
||||
return dynamicAnyType{m.Message.Type()}.New()
|
||||
}
|
||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
||||
}
|
||||
|
||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
||||
|
||||
func (t dynamicAnyType) New() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.New()}
|
||||
}
|
||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
||||
return dynamicAny{t.MessageType.Zero()}
|
||||
}
|
||||
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||
|
||||
package any
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/any.proto.
|
||||
|
||||
type Any = anypb.Any
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
||||
}
|
||||
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
10
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ptypes provides functionality for interacting with well-known types.
|
||||
//
|
||||
// Deprecated: Well-known types have specialized functionality directly
|
||||
// injected into the generated packages for each message type.
|
||||
// See the deprecation notice for each function for the suggested alternative.
|
||||
package ptypes
|
||||
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
76
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
||||
// This is about 10,000 years in seconds.
|
||||
const (
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// Duration converts a durationpb.Duration to a time.Duration.
|
||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
||||
//
|
||||
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
|
||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
||||
if err := validateDuration(dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(dur.Seconds) * time.Second
|
||||
if int64(d/time.Second) != dur.Seconds {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
if dur.Nanos != 0 {
|
||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (dur.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
||||
//
|
||||
// Deprecated: Call the durationpb.New function instead.
|
||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &durationpb.Duration{
|
||||
Seconds: int64(secs),
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
||||
|
||||
// validateDuration determines whether the durationpb.Duration is valid
|
||||
// according to the definition in google/protobuf/duration.proto.
|
||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
||||
// while the range of time.Duration is about 290 years.
|
||||
func validateDuration(dur *durationpb.Duration) error {
|
||||
if dur == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
||||
}
|
||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
||||
|
||||
package duration
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
||||
|
||||
type Duration = durationpb.Duration
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
||||
}
|
||||
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
112
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
@@ -1,112 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ptypes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
|
||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.Now function instead.
|
||||
func TimestampNow() *timestamppb.Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
//
|
||||
// Deprecated: Call the timestamppb.New function instead.
|
||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
||||
ts := ×tamppb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
||||
// For invalid Timestamps, it returns an error message in parentheses.
|
||||
//
|
||||
// Deprecated: Call the ts.AsTime method instead,
|
||||
// followed by a call to the Format method on the time.Time value.
|
||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
||||
t, err := Timestamp(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
||||
// and has a Nanos field in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time,
|
||||
// but the converse is not true.
|
||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@@ -1,64 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
||||
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
||||
|
||||
type Timestamp = timestamppb.Timestamp
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
||||
}
|
||||
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
@@ -1 +0,0 @@
|
||||
language: go
|
||||
2
vendor/github.com/google/btree/README.md
generated
vendored
2
vendor/github.com/google/btree/README.md
generated
vendored
@@ -1,7 +1,5 @@
|
||||
# BTree implementation for Go
|
||||
|
||||

|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
|
||||
5
vendor/github.com/google/btree/btree.go
generated
vendored
5
vendor/github.com/google/btree/btree.go
generated
vendored
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
@@ -476,7 +479,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove)
|
||||
child := n.mutableChild(i)
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
|
||||
1083
vendor/github.com/google/btree/btree_generic.go
generated
vendored
Normal file
1083
vendor/github.com/google/btree/btree_generic.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
10
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
10
vendor/github.com/google/cel-go/cel/env.go
generated
vendored
@@ -217,7 +217,7 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) {
|
||||
chk, err := e.initChecker()
|
||||
if err != nil {
|
||||
errs := common.NewErrors(ast.Source())
|
||||
errs.ReportError(common.NoLocation, err.Error())
|
||||
errs.ReportErrorString(common.NoLocation, err.Error())
|
||||
return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo())
|
||||
}
|
||||
|
||||
@@ -556,7 +556,8 @@ func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) {
|
||||
// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an
|
||||
// Ast format and then Program again.
|
||||
func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) {
|
||||
pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State())
|
||||
ast := a.NativeRep()
|
||||
pruned := interpreter.PruneAst(ast.Expr(), ast.SourceInfo().MacroCalls(), details.State())
|
||||
newAST := &Ast{source: a.Source(), impl: pruned}
|
||||
expr, err := AstToString(newAST)
|
||||
if err != nil {
|
||||
@@ -582,7 +583,7 @@ func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...ch
|
||||
extendedOpts := make([]checker.CostOption, 0, len(e.costOptions))
|
||||
extendedOpts = append(extendedOpts, opts...)
|
||||
extendedOpts = append(extendedOpts, e.costOptions...)
|
||||
return checker.Cost(ast.impl, estimator, extendedOpts...)
|
||||
return checker.Cost(ast.NativeRep(), estimator, extendedOpts...)
|
||||
}
|
||||
|
||||
// configure applies a series of EnvOptions to the current environment.
|
||||
@@ -614,6 +615,9 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) {
|
||||
if e.HasFeature(featureVariadicLogicalASTs) {
|
||||
prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true))
|
||||
}
|
||||
if e.HasFeature(featureIdentEscapeSyntax) {
|
||||
prsrOpts = append(prsrOpts, parser.EnableIdentEscapeSyntax(true))
|
||||
}
|
||||
e.prsr, err = parser.NewParser(prsrOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
2
vendor/github.com/google/cel-go/cel/inlining.go
generated
vendored
2
vendor/github.com/google/cel-go/cel/inlining.go
generated
vendored
@@ -60,7 +60,7 @@ func NewInlineVariable(name string, definition *Ast) *InlineVariable {
|
||||
// If the variable occurs more than once, the provided alias will be used to replace the expressions
|
||||
// where the variable name occurs.
|
||||
func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable {
|
||||
return &InlineVariable{name: name, alias: alias, def: definition.impl}
|
||||
return &InlineVariable{name: name, alias: alias, def: definition.NativeRep()}
|
||||
}
|
||||
|
||||
// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions.
|
||||
|
||||
14
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
14
vendor/github.com/google/cel-go/cel/io.go
generated
vendored
@@ -62,7 +62,7 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) {
|
||||
if !a.IsChecked() {
|
||||
return nil, fmt.Errorf("cannot convert unchecked ast")
|
||||
}
|
||||
return ast.ToProto(a.impl)
|
||||
return ast.ToProto(a.NativeRep())
|
||||
}
|
||||
|
||||
// ParsedExprToAst converts a parsed expression proto message to an Ast.
|
||||
@@ -99,15 +99,17 @@ func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) {
|
||||
// Note, the conversion may not be an exact replica of the original expression, but will produce
|
||||
// a string that is semantically equivalent and whose textual representation is stable.
|
||||
func AstToString(a *Ast) (string, error) {
|
||||
return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo())
|
||||
return parser.Unparse(a.NativeRep().Expr(), a.NativeRep().SourceInfo())
|
||||
}
|
||||
|
||||
// RefValueToValue converts between ref.Val and api.expr.Value.
|
||||
// RefValueToValue converts between ref.Val and google.api.expr.v1alpha1.Value.
|
||||
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
|
||||
func RefValueToValue(res ref.Val) (*exprpb.Value, error) {
|
||||
return ValueAsAlphaProto(res)
|
||||
}
|
||||
|
||||
// ValueAsAlphaProto converts between ref.Val and google.api.expr.v1alpha1.Value.
|
||||
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
|
||||
func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
|
||||
canonical, err := ValueAsProto(res)
|
||||
if err != nil {
|
||||
@@ -118,6 +120,8 @@ func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) {
|
||||
return alpha, err
|
||||
}
|
||||
|
||||
// ValueAsProto converts between ref.Val and cel.expr.Value.
|
||||
// The result Value is the serialized proto form. The ref.Val must not be error or unknown.
|
||||
func ValueAsProto(res ref.Val) (*celpb.Value, error) {
|
||||
switch res.Type() {
|
||||
case types.BoolType:
|
||||
@@ -205,11 +209,12 @@ var (
|
||||
anyPbType = reflect.TypeOf(&anypb.Any{})
|
||||
)
|
||||
|
||||
// ValueToRefValue converts between exprpb.Value and ref.Val.
|
||||
// ValueToRefValue converts between google.api.expr.v1alpha1.Value and ref.Val.
|
||||
func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
|
||||
return AlphaProtoAsValue(adapter, v)
|
||||
}
|
||||
|
||||
// AlphaProtoAsValue converts between google.api.expr.v1alpha1.Value and ref.Val.
|
||||
func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) {
|
||||
canonical := &celpb.Value{}
|
||||
if err := convertProto(v, canonical); err != nil {
|
||||
@@ -218,6 +223,7 @@ func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error)
|
||||
return ProtoAsValue(adapter, canonical)
|
||||
}
|
||||
|
||||
// ProtoAsValue converts between cel.expr.Value and ref.Val.
|
||||
func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) {
|
||||
switch v.Kind.(type) {
|
||||
case *celpb.Value_NullValue:
|
||||
|
||||
112
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
112
vendor/github.com/google/cel-go/cel/library.go
generated
vendored
@@ -15,6 +15,7 @@
|
||||
package cel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -35,9 +36,11 @@ const (
|
||||
optMapMacro = "optMap"
|
||||
optFlatMapMacro = "optFlatMap"
|
||||
hasValueFunc = "hasValue"
|
||||
unwrapOptFunc = "unwrapOpt"
|
||||
optionalNoneFunc = "optional.none"
|
||||
optionalOfFunc = "optional.of"
|
||||
optionalOfNonZeroValueFunc = "optional.ofNonZeroValue"
|
||||
optionalUnwrapFunc = "optional.unwrap"
|
||||
valueFunc = "value"
|
||||
unusedIterVar = "#unused"
|
||||
)
|
||||
@@ -260,6 +263,37 @@ func (stdLibrary) ProgramOptions() []ProgramOption {
|
||||
// be expressed with `optMap`.
|
||||
//
|
||||
// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present.
|
||||
|
||||
// # First
|
||||
//
|
||||
// Introduced in version: 2
|
||||
//
|
||||
// Returns an optional with the first value from the right hand list, or
|
||||
// optional.None.
|
||||
//
|
||||
// [1, 2, 3].first().value() == 1
|
||||
|
||||
// # Last
|
||||
//
|
||||
// Introduced in version: 2
|
||||
//
|
||||
// Returns an optional with the last value from the right hand list, or
|
||||
// optional.None.
|
||||
//
|
||||
// [1, 2, 3].last().value() == 3
|
||||
//
|
||||
// This is syntactic sugar for msg.elements[msg.elements.size()-1].
|
||||
|
||||
// # Unwrap / UnwrapOpt
|
||||
//
|
||||
// Introduced in version: 2
|
||||
//
|
||||
// Returns a list of all the values that are not none in the input list of optional values.
|
||||
// Can be used as optional.unwrap(List[T]) or with postfix notation: List[T].unwrapOpt()
|
||||
//
|
||||
// optional.unwrap([optional.of(42), optional.none()]) == [42]
|
||||
// [optional.of(42), optional.none()].unwrapOpt() == [42]
|
||||
|
||||
func OptionalTypes(opts ...OptionalTypesOption) EnvOption {
|
||||
lib := &optionalLib{version: math.MaxUint32}
|
||||
for _, opt := range opts {
|
||||
@@ -303,6 +337,7 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
optionalTypeV := OptionalType(paramTypeV)
|
||||
listTypeV := ListType(paramTypeV)
|
||||
mapTypeKV := MapType(paramTypeK, paramTypeV)
|
||||
listOptionalTypeV := ListType(optionalTypeV)
|
||||
|
||||
opts := []EnvOption{
|
||||
// Enable the optional syntax in the parser.
|
||||
@@ -375,6 +410,46 @@ func (lib *optionalLib) CompileOptions() []EnvOption {
|
||||
if lib.version >= 1 {
|
||||
opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap)))
|
||||
}
|
||||
|
||||
if lib.version >= 2 {
|
||||
opts = append(opts, Function("last",
|
||||
MemberOverload("list_last", []*Type{listTypeV}, optionalTypeV,
|
||||
UnaryBinding(func(v ref.Val) ref.Val {
|
||||
list := v.(traits.Lister)
|
||||
sz := list.Size().Value().(int64)
|
||||
|
||||
if sz == 0 {
|
||||
return types.OptionalNone
|
||||
}
|
||||
|
||||
return types.OptionalOf(list.Get(types.Int(sz - 1)))
|
||||
}),
|
||||
),
|
||||
))
|
||||
|
||||
opts = append(opts, Function("first",
|
||||
MemberOverload("list_first", []*Type{listTypeV}, optionalTypeV,
|
||||
UnaryBinding(func(v ref.Val) ref.Val {
|
||||
list := v.(traits.Lister)
|
||||
sz := list.Size().Value().(int64)
|
||||
|
||||
if sz == 0 {
|
||||
return types.OptionalNone
|
||||
}
|
||||
|
||||
return types.OptionalOf(list.Get(types.Int(0)))
|
||||
}),
|
||||
),
|
||||
))
|
||||
|
||||
opts = append(opts, Function(optionalUnwrapFunc,
|
||||
Overload("optional_unwrap", []*Type{listOptionalTypeV}, listTypeV,
|
||||
UnaryBinding(optUnwrap))))
|
||||
opts = append(opts, Function(unwrapOptFunc,
|
||||
MemberOverload("optional_unwrapOpt", []*Type{listOptionalTypeV}, listTypeV,
|
||||
UnaryBinding(optUnwrap))))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
@@ -439,6 +514,23 @@ func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Exp
|
||||
), nil
|
||||
}
|
||||
|
||||
func optUnwrap(value ref.Val) ref.Val {
|
||||
list := value.(traits.Lister)
|
||||
var unwrappedList []ref.Val
|
||||
iter := list.Iterator()
|
||||
for iter.HasNext() == types.True {
|
||||
val := iter.Next()
|
||||
opt, isOpt := val.(*types.Optional)
|
||||
if !isOpt {
|
||||
return types.WrapErr(fmt.Errorf("value %v is not optional", val))
|
||||
}
|
||||
if opt.HasValue() {
|
||||
unwrappedList = append(unwrappedList, opt.GetValue())
|
||||
}
|
||||
}
|
||||
return types.DefaultTypeAdapter.NativeToValue(unwrappedList)
|
||||
}
|
||||
|
||||
func enableOptionalSyntax() EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true))
|
||||
@@ -677,7 +769,7 @@ var (
|
||||
func timestampGetFullYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Year())
|
||||
}
|
||||
@@ -685,7 +777,7 @@ func timestampGetFullYear(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetMonth(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
// CEL spec indicates that the month should be 0-based, but the Time value
|
||||
// for Month() is 1-based.
|
||||
@@ -695,7 +787,7 @@ func timestampGetMonth(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.YearDay() - 1)
|
||||
}
|
||||
@@ -703,7 +795,7 @@ func timestampGetDayOfYear(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Day() - 1)
|
||||
}
|
||||
@@ -711,7 +803,7 @@ func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Day())
|
||||
}
|
||||
@@ -719,7 +811,7 @@ func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Weekday())
|
||||
}
|
||||
@@ -727,7 +819,7 @@ func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetHours(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Hour())
|
||||
}
|
||||
@@ -735,7 +827,7 @@ func timestampGetHours(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetMinutes(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Minute())
|
||||
}
|
||||
@@ -743,7 +835,7 @@ func timestampGetMinutes(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetSeconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Second())
|
||||
}
|
||||
@@ -751,7 +843,7 @@ func timestampGetSeconds(ts, tz ref.Val) ref.Val {
|
||||
func timestampGetMilliseconds(ts, tz ref.Val) ref.Val {
|
||||
t, err := inTimeZone(ts, tz)
|
||||
if err != nil {
|
||||
return types.NewErr(err.Error())
|
||||
return types.NewErrFromString(err.Error())
|
||||
}
|
||||
return types.Int(t.Nanosecond() / 1000000)
|
||||
}
|
||||
|
||||
6
vendor/github.com/google/cel-go/cel/optimizer.go
generated
vendored
6
vendor/github.com/google/cel-go/cel/optimizer.go
generated
vendored
@@ -48,8 +48,8 @@ func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer {
|
||||
// If issues are encountered, the Issues.Err() return value will be non-nil.
|
||||
func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
|
||||
// Make a copy of the AST to be optimized.
|
||||
optimized := ast.Copy(a.impl)
|
||||
ids := newIDGenerator(ast.MaxID(a.impl))
|
||||
optimized := ast.Copy(a.NativeRep())
|
||||
ids := newIDGenerator(ast.MaxID(a.NativeRep()))
|
||||
|
||||
// Create the optimizer context, could be pooled in the future.
|
||||
issues := NewIssues(common.NewErrors(a.Source()))
|
||||
@@ -86,7 +86,7 @@ func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) {
|
||||
if iss.Err() != nil {
|
||||
return nil, iss
|
||||
}
|
||||
optimized = checked.impl
|
||||
optimized = checked.NativeRep()
|
||||
}
|
||||
|
||||
// Return the optimized result.
|
||||
|
||||
18
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
18
vendor/github.com/google/cel-go/cel/options.go
generated
vendored
@@ -65,6 +65,9 @@ const (
|
||||
// Enable error generation when a presence test or optional field selection is
|
||||
// performed on a primitive type.
|
||||
featureEnableErrorOnBadPresenceTest
|
||||
|
||||
// Enable escape syntax for field identifiers (`).
|
||||
featureIdentEscapeSyntax
|
||||
)
|
||||
|
||||
// EnvOption is a functional interface for configuring the environment.
|
||||
@@ -618,6 +621,12 @@ func EnableMacroCallTracking() EnvOption {
|
||||
return features(featureEnableMacroCallTracking, true)
|
||||
}
|
||||
|
||||
// EnableIdentifierEscapeSyntax enables identifier escaping (`) syntax for
|
||||
// fields.
|
||||
func EnableIdentifierEscapeSyntax() EnvOption {
|
||||
return features(featureIdentEscapeSyntax, true)
|
||||
}
|
||||
|
||||
// CrossTypeNumericComparisons makes it possible to compare across numeric types, e.g. double < int
|
||||
func CrossTypeNumericComparisons(enabled bool) EnvOption {
|
||||
return features(featureCrossTypeNumericComparisons, enabled)
|
||||
@@ -655,6 +664,15 @@ func ParserExpressionSizeLimit(limit int) EnvOption {
|
||||
}
|
||||
}
|
||||
|
||||
// EnableHiddenAccumulatorName sets the parser to use the identifier '@result' for accumulators
|
||||
// which is not normally accessible from CEL source.
|
||||
func EnableHiddenAccumulatorName(enabled bool) EnvOption {
|
||||
return func(e *Env) (*Env, error) {
|
||||
e.prsrOpts = append(e.prsrOpts, parser.EnableHiddenAccumulatorName(enabled))
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
|
||||
func maybeInteropProvider(provider any) (types.Provider, error) {
|
||||
switch p := provider.(type) {
|
||||
case types.Provider:
|
||||
|
||||
3
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
3
vendor/github.com/google/cel-go/cel/program.go
generated
vendored
@@ -100,6 +100,9 @@ type EvalDetails struct {
|
||||
// State of the evaluation, non-nil if the OptTrackState or OptExhaustiveEval is specified
|
||||
// within EvalOptions.
|
||||
func (ed *EvalDetails) State() interpreter.EvalState {
|
||||
if ed == nil {
|
||||
return interpreter.NewEvalState()
|
||||
}
|
||||
return ed.state
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
6
vendor/github.com/google/cel-go/checker/checker.go
generated
vendored
@@ -529,9 +529,15 @@ func (c *checker) checkComprehension(e ast.Expr) {
|
||||
c.isAssignable(types.DynType, rangeType)
|
||||
// Set the range iteration variable to type DYN as well.
|
||||
varType = types.DynType
|
||||
if comp.HasIterVar2() {
|
||||
var2Type = types.DynType
|
||||
}
|
||||
default:
|
||||
c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType)
|
||||
varType = types.ErrorType
|
||||
if comp.HasIterVar2() {
|
||||
var2Type = types.ErrorType
|
||||
}
|
||||
}
|
||||
|
||||
// Create a block scope for the loop.
|
||||
|
||||
654
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
654
vendor/github.com/google/cel-go/checker/cost.go
generated
vendored
@@ -28,15 +28,20 @@ import (
|
||||
|
||||
// CostEstimator estimates the sizes of variable length input data and the costs of functions.
|
||||
type CostEstimator interface {
|
||||
// EstimateSize returns a SizeEstimate for the given AstNode, or nil if
|
||||
// the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function:
|
||||
// length of strings and bytes, number of map entries or number of list items.
|
||||
// EstimateSize is only called for AstNodes where
|
||||
// CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size
|
||||
// is already obvious to CEL.
|
||||
// EstimateSize returns a SizeEstimate for the given AstNode, or nil if the estimator has no
|
||||
// estimate to provide.
|
||||
//
|
||||
// The size is equivalent to the result of the CEL `size()` function:
|
||||
// * Number of unicode characters in a string
|
||||
// * Number of bytes in a sequence
|
||||
// * Number of map entries or number of list items.
|
||||
//
|
||||
// EstimateSize is only called for AstNodes where CEL does not know the size; EstimateSize is not
|
||||
// called for values defined inline in CEL where the size is already obvious to CEL.
|
||||
EstimateSize(element AstNode) *SizeEstimate
|
||||
// EstimateCallCost returns the estimated cost of an invocation, or nil if
|
||||
// the estimator has no estimate to provide.
|
||||
|
||||
// EstimateCallCost returns the estimated cost of an invocation, or nil if the estimator has no
|
||||
// estimate to provide.
|
||||
EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate
|
||||
}
|
||||
|
||||
@@ -44,6 +49,7 @@ type CostEstimator interface {
|
||||
// The ResultSize should only be provided if the call results in a map, list, string or bytes.
|
||||
type CallEstimate struct {
|
||||
CostEstimate
|
||||
|
||||
ResultSize *SizeEstimate
|
||||
}
|
||||
|
||||
@@ -53,10 +59,13 @@ type AstNode interface {
|
||||
// represent type directly reachable from the provided type declarations.
|
||||
// The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'.
|
||||
Path() []string
|
||||
|
||||
// Type returns the deduced type of the AstNode.
|
||||
Type() *types.Type
|
||||
|
||||
// Expr returns the expression of the AstNode.
|
||||
Expr() ast.Expr
|
||||
|
||||
// ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression.
|
||||
// For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings
|
||||
// and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no
|
||||
@@ -84,36 +93,7 @@ func (e astNode) Expr() ast.Expr {
|
||||
}
|
||||
|
||||
func (e astNode) ComputedSize() *SizeEstimate {
|
||||
if e.derivedSize != nil {
|
||||
return e.derivedSize
|
||||
}
|
||||
var v uint64
|
||||
switch e.expr.Kind() {
|
||||
case ast.LiteralKind:
|
||||
switch ck := e.expr.AsLiteral().(type) {
|
||||
case types.String:
|
||||
// converting to runes here is an O(n) operation, but
|
||||
// this is consistent with how size is computed at runtime,
|
||||
// and how the language definition defines string size
|
||||
v = uint64(len([]rune(ck)))
|
||||
case types.Bytes:
|
||||
v = uint64(len(ck))
|
||||
case types.Bool, types.Double, types.Duration,
|
||||
types.Int, types.Timestamp, types.Uint,
|
||||
types.Null:
|
||||
v = uint64(1)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
case ast.ListKind:
|
||||
v = uint64(e.expr.AsList().Size())
|
||||
case ast.MapKind:
|
||||
v = uint64(e.expr.AsMap().Size())
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return &SizeEstimate{Min: v, Max: v}
|
||||
return e.derivedSize
|
||||
}
|
||||
|
||||
// SizeEstimate represents an estimated size of a variable length string, bytes, map or list.
|
||||
@@ -121,6 +101,16 @@ type SizeEstimate struct {
|
||||
Min, Max uint64
|
||||
}
|
||||
|
||||
// UnknownSizeEstimate returns a size between 0 and max uint
|
||||
func UnknownSizeEstimate() SizeEstimate {
|
||||
return unknownSizeEstimate
|
||||
}
|
||||
|
||||
// FixedSizeEstimate returns a size estimate with a fixed min and max range.
|
||||
func FixedSizeEstimate(size uint64) SizeEstimate {
|
||||
return SizeEstimate{Min: size, Max: size}
|
||||
}
|
||||
|
||||
// Add adds to another SizeEstimate and returns the sum.
|
||||
// If add would result in an uint64 overflow, the result is math.MaxUint64.
|
||||
func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate {
|
||||
@@ -175,12 +165,22 @@ type CostEstimate struct {
|
||||
Min, Max uint64
|
||||
}
|
||||
|
||||
// UnknownCostEstimate returns a cost with an unknown impact.
|
||||
func UnknownCostEstimate() CostEstimate {
|
||||
return unknownCostEstimate
|
||||
}
|
||||
|
||||
// FixedCostEstimate returns a cost with a fixed min and max range.
|
||||
func FixedCostEstimate(cost uint64) CostEstimate {
|
||||
return CostEstimate{Min: cost, Max: cost}
|
||||
}
|
||||
|
||||
// Add adds the costs and returns the sum.
|
||||
// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64.
|
||||
func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
|
||||
return CostEstimate{
|
||||
addUint64NoOverflow(ce.Min, cost.Min),
|
||||
addUint64NoOverflow(ce.Max, cost.Max),
|
||||
Min: addUint64NoOverflow(ce.Min, cost.Min),
|
||||
Max: addUint64NoOverflow(ce.Max, cost.Max),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,8 +188,8 @@ func (ce CostEstimate) Add(cost CostEstimate) CostEstimate {
|
||||
// If multiply would result in an uint64 overflow, the result is math.MaxUint64.
|
||||
func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
|
||||
return CostEstimate{
|
||||
multiplyUint64NoOverflow(ce.Min, cost.Min),
|
||||
multiplyUint64NoOverflow(ce.Max, cost.Max),
|
||||
Min: multiplyUint64NoOverflow(ce.Min, cost.Min),
|
||||
Max: multiplyUint64NoOverflow(ce.Max, cost.Max),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,8 +197,8 @@ func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate {
|
||||
// nearest integer of the result, rounded up.
|
||||
func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate {
|
||||
return CostEstimate{
|
||||
multiplyByCostFactor(ce.Min, costPerUnit),
|
||||
multiplyByCostFactor(ce.Max, costPerUnit),
|
||||
Min: multiplyByCostFactor(ce.Min, costPerUnit),
|
||||
Max: multiplyByCostFactor(ce.Max, costPerUnit),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,49 +245,6 @@ func multiplyByCostFactor(x uint64, y float64) uint64 {
|
||||
return uint64(ceil)
|
||||
}
|
||||
|
||||
var (
|
||||
selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost}
|
||||
constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost}
|
||||
|
||||
createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
|
||||
createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost}
|
||||
createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost}
|
||||
)
|
||||
|
||||
type coster struct {
|
||||
// exprPath maps from Expr Id to field path.
|
||||
exprPath map[int64][]string
|
||||
// iterRanges tracks the iterRange of each iterVar.
|
||||
iterRanges iterRangeScopes
|
||||
// computedSizes tracks the computed sizes of call results.
|
||||
computedSizes map[int64]SizeEstimate
|
||||
checkedAST *ast.AST
|
||||
estimator CostEstimator
|
||||
overloadEstimators map[string]FunctionEstimator
|
||||
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
|
||||
presenceTestCost CostEstimate
|
||||
}
|
||||
|
||||
// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names.
|
||||
type iterRangeScopes map[string][]int64
|
||||
|
||||
func (vs iterRangeScopes) push(varName string, expr ast.Expr) {
|
||||
vs[varName] = append(vs[varName], expr.ID())
|
||||
}
|
||||
|
||||
func (vs iterRangeScopes) pop(varName string) {
|
||||
varStack := vs[varName]
|
||||
vs[varName] = varStack[:len(varStack)-1]
|
||||
}
|
||||
|
||||
func (vs iterRangeScopes) peek(varName string) (int64, bool) {
|
||||
varStack := vs[varName]
|
||||
if len(varStack) > 0 {
|
||||
return varStack[len(varStack)-1], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// CostOption configures flags which affect cost computations.
|
||||
type CostOption func(*coster) error
|
||||
|
||||
@@ -300,7 +257,7 @@ func PresenceTestHasCost(hasCost bool) CostOption {
|
||||
c.presenceTestCost = selectAndIdentCost
|
||||
return nil
|
||||
}
|
||||
c.presenceTestCost = CostEstimate{Min: 0, Max: 0}
|
||||
c.presenceTestCost = FixedCostEstimate(0)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -325,10 +282,11 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs
|
||||
checkedAST: checked,
|
||||
estimator: estimator,
|
||||
overloadEstimators: map[string]FunctionEstimator{},
|
||||
exprPath: map[int64][]string{},
|
||||
iterRanges: map[string][]int64{},
|
||||
exprPaths: map[int64][]string{},
|
||||
localVars: make(scopes),
|
||||
computedSizes: map[int64]SizeEstimate{},
|
||||
presenceTestCost: CostEstimate{Min: 1, Max: 1},
|
||||
computedEntrySizes: map[int64]entrySizeEstimate{},
|
||||
presenceTestCost: FixedCostEstimate(1),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
err := opt(c)
|
||||
@@ -339,6 +297,165 @@ func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEs
|
||||
return c.cost(checked.Expr()), nil
|
||||
}
|
||||
|
||||
type coster struct {
|
||||
// exprPaths maps from Expr Id to field path.
|
||||
exprPaths map[int64][]string
|
||||
// localVars tracks the local and iteration variables assigned during evaluation.
|
||||
localVars scopes
|
||||
// computedSizes tracks the computed sizes of call results.
|
||||
computedSizes map[int64]SizeEstimate
|
||||
// computedEntrySizes tracks the size of list and map entries
|
||||
computedEntrySizes map[int64]entrySizeEstimate
|
||||
|
||||
checkedAST *ast.AST
|
||||
estimator CostEstimator
|
||||
overloadEstimators map[string]FunctionEstimator
|
||||
// presenceTestCost will either be a zero or one based on whether has() macros count against cost computations.
|
||||
presenceTestCost CostEstimate
|
||||
}
|
||||
|
||||
// entrySizeEstimate captures the container kind and associated key/index and value SizeEstimate values.
|
||||
//
|
||||
// An entrySizeEstimate only exists if both the key/index and the value have SizeEstimate values, otherwise
|
||||
// a nil entrySizeEstimate should be used.
|
||||
type entrySizeEstimate struct {
|
||||
containerKind types.Kind
|
||||
key SizeEstimate
|
||||
val SizeEstimate
|
||||
}
|
||||
|
||||
// container returns the container kind (list or map) of the entry.
|
||||
func (s *entrySizeEstimate) container() types.Kind {
|
||||
if s == nil {
|
||||
return types.UnknownKind
|
||||
}
|
||||
return s.containerKind
|
||||
}
|
||||
|
||||
// keySize returns the SizeEstimate for the key if one exists.
|
||||
func (s *entrySizeEstimate) keySize() *SizeEstimate {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return &s.key
|
||||
}
|
||||
|
||||
// valSize returns the SizeEstimate for the value if one exists.
|
||||
func (s *entrySizeEstimate) valSize() *SizeEstimate {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return &s.val
|
||||
}
|
||||
|
||||
func (s *entrySizeEstimate) union(other *entrySizeEstimate) *entrySizeEstimate {
|
||||
if s == nil || other == nil {
|
||||
return nil
|
||||
}
|
||||
sk := s.key.Union(other.key)
|
||||
sv := s.val.Union(other.val)
|
||||
return &entrySizeEstimate{
|
||||
containerKind: s.containerKind,
|
||||
key: sk,
|
||||
val: sv,
|
||||
}
|
||||
}
|
||||
|
||||
// localVar captures the local variable size and entrySize estimates if they exist for variables
|
||||
type localVar struct {
|
||||
exprID int64
|
||||
path []string
|
||||
size *SizeEstimate
|
||||
entrySize *entrySizeEstimate
|
||||
}
|
||||
|
||||
// scopes is a stack of variable name to integer id stack to handle scopes created by cel.bind() like macros
|
||||
type scopes map[string][]*localVar
|
||||
|
||||
func (s scopes) push(varName string, expr ast.Expr, path []string, size *SizeEstimate, entrySize *entrySizeEstimate) {
|
||||
s[varName] = append(s[varName], &localVar{
|
||||
exprID: expr.ID(),
|
||||
path: path,
|
||||
size: size,
|
||||
entrySize: entrySize,
|
||||
})
|
||||
}
|
||||
|
||||
func (s scopes) pop(varName string) {
|
||||
varStack := s[varName]
|
||||
s[varName] = varStack[:len(varStack)-1]
|
||||
}
|
||||
|
||||
func (s scopes) peek(varName string) (*localVar, bool) {
|
||||
varStack := s[varName]
|
||||
if len(varStack) > 0 {
|
||||
return varStack[len(varStack)-1], true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (c *coster) pushIterKey(varName string, rangeExpr ast.Expr) {
|
||||
entrySize := c.computeEntrySize(rangeExpr)
|
||||
size := entrySize.keySize()
|
||||
path := c.getPath(rangeExpr)
|
||||
container := entrySize.container()
|
||||
if container == types.UnknownKind {
|
||||
container = c.getType(rangeExpr).Kind()
|
||||
}
|
||||
subpath := "@keys"
|
||||
if container == types.ListKind {
|
||||
subpath = "@indices"
|
||||
}
|
||||
c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
|
||||
}
|
||||
|
||||
func (c *coster) pushIterValue(varName string, rangeExpr ast.Expr) {
|
||||
entrySize := c.computeEntrySize(rangeExpr)
|
||||
size := entrySize.valSize()
|
||||
path := c.getPath(rangeExpr)
|
||||
container := entrySize.container()
|
||||
if container == types.UnknownKind {
|
||||
container = c.getType(rangeExpr).Kind()
|
||||
}
|
||||
subpath := "@values"
|
||||
if container == types.ListKind {
|
||||
subpath = "@items"
|
||||
}
|
||||
c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
|
||||
}
|
||||
|
||||
func (c *coster) pushIterSingle(varName string, rangeExpr ast.Expr) {
|
||||
entrySize := c.computeEntrySize(rangeExpr)
|
||||
size := entrySize.keySize()
|
||||
subpath := "@keys"
|
||||
container := entrySize.container()
|
||||
if container == types.UnknownKind {
|
||||
container = c.getType(rangeExpr).Kind()
|
||||
}
|
||||
if container == types.ListKind {
|
||||
size = entrySize.valSize()
|
||||
subpath = "@items"
|
||||
}
|
||||
path := c.getPath(rangeExpr)
|
||||
c.localVars.push(varName, rangeExpr, append(path, subpath), size, nil)
|
||||
}
|
||||
|
||||
func (c *coster) pushLocalVar(varName string, e ast.Expr) {
|
||||
path := c.getPath(e)
|
||||
// note: retrieve the entry size for the local variable based on the size of the binding expression
|
||||
// since the binding expression could be a list or map, the entry size should also be propagated
|
||||
entrySize := c.computeEntrySize(e)
|
||||
c.localVars.push(varName, e, path, c.computeSize(e), entrySize)
|
||||
}
|
||||
|
||||
func (c *coster) peekLocalVar(varName string) (*localVar, bool) {
|
||||
return c.localVars.peek(varName)
|
||||
}
|
||||
|
||||
func (c *coster) popLocalVar(varName string) {
|
||||
c.localVars.pop(varName)
|
||||
}
|
||||
|
||||
func (c *coster) cost(e ast.Expr) CostEstimate {
|
||||
if e == nil {
|
||||
return CostEstimate{}
|
||||
@@ -360,7 +477,11 @@ func (c *coster) cost(e ast.Expr) CostEstimate {
|
||||
case ast.StructKind:
|
||||
cost = c.costCreateStruct(e)
|
||||
case ast.ComprehensionKind:
|
||||
cost = c.costComprehension(e)
|
||||
if c.isBind(e) {
|
||||
cost = c.costBind(e)
|
||||
} else {
|
||||
cost = c.costComprehension(e)
|
||||
}
|
||||
default:
|
||||
return CostEstimate{}
|
||||
}
|
||||
@@ -370,17 +491,11 @@ func (c *coster) cost(e ast.Expr) CostEstimate {
|
||||
func (c *coster) costIdent(e ast.Expr) CostEstimate {
|
||||
identName := e.AsIdent()
|
||||
// build and track the field path
|
||||
if iterRange, ok := c.iterRanges.peek(identName); ok {
|
||||
switch c.checkedAST.GetType(iterRange).Kind() {
|
||||
case types.ListKind:
|
||||
c.addPath(e, append(c.exprPath[iterRange], "@items"))
|
||||
case types.MapKind:
|
||||
c.addPath(e, append(c.exprPath[iterRange], "@keys"))
|
||||
}
|
||||
if v, ok := c.peekLocalVar(identName); ok {
|
||||
c.addPath(e, v.path)
|
||||
} else {
|
||||
c.addPath(e, []string{identName})
|
||||
}
|
||||
|
||||
return selectAndIdentCost
|
||||
}
|
||||
|
||||
@@ -405,14 +520,18 @@ func (c *coster) costSelect(e ast.Expr) CostEstimate {
|
||||
|
||||
// build and track the field path
|
||||
c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName()))
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
func (c *coster) costCall(e ast.Expr) CostEstimate {
|
||||
// Dyn is just a way to disable type-checking, so return the cost of 1 with the cost of the argument
|
||||
if dynEstimate := c.maybeUnwrapDynCall(e); dynEstimate != nil {
|
||||
return *dynEstimate
|
||||
}
|
||||
|
||||
// Continue estimating the cost of all other calls.
|
||||
call := e.AsCall()
|
||||
args := call.Args()
|
||||
|
||||
var sum CostEstimate
|
||||
|
||||
argTypes := make([]AstNode, len(args))
|
||||
@@ -435,7 +554,7 @@ func (c *coster) costCall(e ast.Expr) CostEstimate {
|
||||
fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0}
|
||||
var resultSize *SizeEstimate
|
||||
for _, overload := range overloadIDs {
|
||||
overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts)
|
||||
overloadCost := c.functionCost(e, call.FunctionName(), overload, &targetType, argTypes, argCosts)
|
||||
fnCost = fnCost.Union(overloadCost.CostEstimate)
|
||||
if overloadCost.ResultSize != nil {
|
||||
if resultSize == nil {
|
||||
@@ -449,37 +568,73 @@ func (c *coster) costCall(e ast.Expr) CostEstimate {
|
||||
switch overload {
|
||||
case overloads.IndexList:
|
||||
if len(args) > 0 {
|
||||
// note: assigning resultSize here could be redundant with the path-based lookup later
|
||||
resultSize = c.computeEntrySize(args[0]).valSize()
|
||||
c.addPath(e, append(c.getPath(args[0]), "@items"))
|
||||
}
|
||||
case overloads.IndexMap:
|
||||
if len(args) > 0 {
|
||||
resultSize = c.computeEntrySize(args[0]).valSize()
|
||||
c.addPath(e, append(c.getPath(args[0]), "@values"))
|
||||
}
|
||||
}
|
||||
if resultSize == nil {
|
||||
resultSize = c.computeSize(e)
|
||||
}
|
||||
}
|
||||
if resultSize != nil {
|
||||
c.computedSizes[e.ID()] = *resultSize
|
||||
}
|
||||
c.setSize(e, resultSize)
|
||||
return sum.Add(fnCost)
|
||||
}
|
||||
|
||||
func (c *coster) maybeUnwrapDynCall(e ast.Expr) *CostEstimate {
|
||||
call := e.AsCall()
|
||||
if call.FunctionName() != "dyn" {
|
||||
return nil
|
||||
}
|
||||
arg := call.Args()[0]
|
||||
argCost := c.cost(arg)
|
||||
c.copySizeEstimates(e, arg)
|
||||
callCost := FixedCostEstimate(1).Add(argCost)
|
||||
return &callCost
|
||||
}
|
||||
|
||||
func (c *coster) costCreateList(e ast.Expr) CostEstimate {
|
||||
create := e.AsList()
|
||||
var sum CostEstimate
|
||||
itemSize := SizeEstimate{Min: math.MaxUint64, Max: 0}
|
||||
if create.Size() == 0 {
|
||||
itemSize.Min = 0
|
||||
}
|
||||
for _, e := range create.Elements() {
|
||||
sum = sum.Add(c.cost(e))
|
||||
is := c.sizeOrUnknown(e)
|
||||
itemSize = itemSize.Union(is)
|
||||
}
|
||||
c.setEntrySize(e, &entrySizeEstimate{containerKind: types.ListKind, key: FixedSizeEstimate(1), val: itemSize})
|
||||
return sum.Add(createListBaseCost)
|
||||
}
|
||||
|
||||
func (c *coster) costCreateMap(e ast.Expr) CostEstimate {
|
||||
mapVal := e.AsMap()
|
||||
var sum CostEstimate
|
||||
keySize := SizeEstimate{Min: math.MaxUint64, Max: 0}
|
||||
valSize := SizeEstimate{Min: math.MaxUint64, Max: 0}
|
||||
if mapVal.Size() == 0 {
|
||||
valSize.Min = 0
|
||||
keySize.Min = 0
|
||||
}
|
||||
for _, ent := range mapVal.Entries() {
|
||||
entry := ent.AsMapEntry()
|
||||
sum = sum.Add(c.cost(entry.Key()))
|
||||
sum = sum.Add(c.cost(entry.Value()))
|
||||
// Compute the key size range
|
||||
ks := c.sizeOrUnknown(entry.Key())
|
||||
keySize = keySize.Union(ks)
|
||||
// Compute the value size range
|
||||
vs := c.sizeOrUnknown(entry.Value())
|
||||
valSize = valSize.Union(vs)
|
||||
}
|
||||
c.setEntrySize(e, &entrySizeEstimate{containerKind: types.MapKind, key: keySize, val: valSize})
|
||||
return sum.Add(createMapBaseCost)
|
||||
}
|
||||
|
||||
@@ -498,43 +653,76 @@ func (c *coster) costComprehension(e ast.Expr) CostEstimate {
|
||||
var sum CostEstimate
|
||||
sum = sum.Add(c.cost(comp.IterRange()))
|
||||
sum = sum.Add(c.cost(comp.AccuInit()))
|
||||
c.pushLocalVar(comp.AccuVar(), comp.AccuInit())
|
||||
|
||||
// Track the iterRange of each IterVar for field path construction
|
||||
c.iterRanges.push(comp.IterVar(), comp.IterRange())
|
||||
// Track the iterRange of each IterVar and AccuVar for field path construction
|
||||
if comp.HasIterVar2() {
|
||||
c.pushIterKey(comp.IterVar(), comp.IterRange())
|
||||
c.pushIterValue(comp.IterVar2(), comp.IterRange())
|
||||
} else {
|
||||
c.pushIterSingle(comp.IterVar(), comp.IterRange())
|
||||
}
|
||||
|
||||
// Determine the cost for each element in the loop
|
||||
loopCost := c.cost(comp.LoopCondition())
|
||||
stepCost := c.cost(comp.LoopStep())
|
||||
c.iterRanges.pop(comp.IterVar())
|
||||
|
||||
// Clear the intermediate variable tracking.
|
||||
c.popLocalVar(comp.IterVar())
|
||||
if comp.HasIterVar2() {
|
||||
c.popLocalVar(comp.IterVar2())
|
||||
}
|
||||
|
||||
// Determine the result cost.
|
||||
sum = sum.Add(c.cost(comp.Result()))
|
||||
rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange()))
|
||||
|
||||
c.computedSizes[e.ID()] = rangeCnt
|
||||
c.localVars.pop(comp.AccuVar())
|
||||
|
||||
// Estimate the cost of the loop.
|
||||
rangeCnt := c.sizeOrUnknown(comp.IterRange())
|
||||
rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost))
|
||||
sum = sum.Add(rangeCost)
|
||||
|
||||
switch k := comp.AccuInit().Kind(); k {
|
||||
case ast.LiteralKind:
|
||||
c.setSize(e, c.computeSize(comp.AccuInit()))
|
||||
case ast.ListKind, ast.MapKind:
|
||||
c.setSize(e, &rangeCnt)
|
||||
// For a step which produces a container value, it will have an entry size associated
|
||||
// with its expression id.
|
||||
if stepEntrySize := c.computeEntrySize(comp.LoopStep()); stepEntrySize != nil {
|
||||
c.setEntrySize(e, stepEntrySize)
|
||||
break
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (c *coster) sizeEstimate(t AstNode) SizeEstimate {
|
||||
if l := t.ComputedSize(); l != nil {
|
||||
return *l
|
||||
}
|
||||
if l := c.estimator.EstimateSize(t); l != nil {
|
||||
return *l
|
||||
}
|
||||
// return an estimate of 1 for return types of set
|
||||
// lengths, since strings/bytes/more complex objects could be of
|
||||
// variable length
|
||||
if isScalar(t.Type()) {
|
||||
// TODO: since the logic for size estimation is split between
|
||||
// ComputedSize and isScalar, changing one will likely require changing
|
||||
// the other, so they should be merged in the future if possible
|
||||
return SizeEstimate{Min: 1, Max: 1}
|
||||
}
|
||||
return SizeEstimate{Min: 0, Max: math.MaxUint64}
|
||||
func (c *coster) isBind(e ast.Expr) bool {
|
||||
comp := e.AsComprehension()
|
||||
iterRange := comp.IterRange()
|
||||
loopCond := comp.LoopCondition()
|
||||
return iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 &&
|
||||
loopCond.Kind() == ast.LiteralKind && loopCond.AsLiteral() == types.False &&
|
||||
comp.AccuVar() != parser.AccumulatorName
|
||||
}
|
||||
|
||||
func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
|
||||
func (c *coster) costBind(e ast.Expr) CostEstimate {
|
||||
comp := e.AsComprehension()
|
||||
var sum CostEstimate
|
||||
// Binds are lazily initialized, so we retain the cost of an empty iteration range.
|
||||
sum = sum.Add(c.cost(comp.IterRange()))
|
||||
sum = sum.Add(c.cost(comp.AccuInit()))
|
||||
|
||||
c.pushLocalVar(comp.AccuVar(), comp.AccuInit())
|
||||
sum = sum.Add(c.cost(comp.Result()))
|
||||
c.popLocalVar(comp.AccuVar())
|
||||
|
||||
// Associate the bind output size with the result size.
|
||||
c.copySizeEstimates(e, comp.Result())
|
||||
return sum
|
||||
}
|
||||
|
||||
func (c *coster) functionCost(e ast.Expr, function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate {
|
||||
argCostSum := func() CostEstimate {
|
||||
var sum CostEstimate
|
||||
for _, a := range argCosts {
|
||||
@@ -559,35 +747,42 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
case overloads.ExtFormatString:
|
||||
if target != nil {
|
||||
// ResultSize not calculated because we can't bound the max size.
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
return CallEstimate{
|
||||
CostEstimate: c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
case overloads.StringToBytes:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
sz := c.sizeOrUnknown(args[0])
|
||||
// ResultSize max is when each char converts to 4 bytes.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
|
||||
return CallEstimate{
|
||||
CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
|
||||
ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}}
|
||||
}
|
||||
case overloads.BytesToString:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
sz := c.sizeOrUnknown(args[0])
|
||||
// ResultSize min is when 4 bytes convert to 1 char.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
|
||||
return CallEstimate{
|
||||
CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
|
||||
ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}}
|
||||
}
|
||||
case overloads.ExtQuoteString:
|
||||
if len(args) == 1 {
|
||||
sz := c.sizeEstimate(args[0])
|
||||
sz := c.sizeOrUnknown(args[0])
|
||||
// ResultSize max is when each char is escaped. 2 quote chars always added.
|
||||
return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
|
||||
return CallEstimate{
|
||||
CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
|
||||
ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}}
|
||||
}
|
||||
case overloads.StartsWithString, overloads.EndsWithString:
|
||||
if len(args) == 1 {
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
return CallEstimate{CostEstimate: c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
}
|
||||
case overloads.InList:
|
||||
// If a list is composed entirely of constant values this is O(1), but we don't account for that here.
|
||||
// We just assume all list containment checks are O(n).
|
||||
if len(args) == 2 {
|
||||
return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
|
||||
return CallEstimate{CostEstimate: c.sizeOrUnknown(args[1]).MultiplyByCostFactor(1).Add(argCostSum())}
|
||||
}
|
||||
// O(nm) functions
|
||||
case overloads.MatchesString:
|
||||
@@ -595,19 +790,19 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
if target != nil && len(args) == 1 {
|
||||
// Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
|
||||
// in case where string is empty but regex is still expensive.
|
||||
strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
strCost := c.sizeOrUnknown(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
// We don't know how many expressions are in the regex, just the string length (a huge
|
||||
// improvement here would be to somehow get a count the number of expressions in the regex or
|
||||
// how many states are in the regex state machine and use that to measure regex cost).
|
||||
// For now, we're making a guess that each expression in a regex is typically at least 4 chars
|
||||
// in length.
|
||||
regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
|
||||
regexCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
|
||||
return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())}
|
||||
}
|
||||
case overloads.ContainsString:
|
||||
if target != nil && len(args) == 1 {
|
||||
strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
strCost := c.sizeOrUnknown(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
substrCost := c.sizeOrUnknown(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor)
|
||||
return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())}
|
||||
}
|
||||
case overloads.LogicalOr, overloads.LogicalAnd:
|
||||
@@ -617,7 +812,9 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max}
|
||||
return CallEstimate{CostEstimate: argCost}
|
||||
case overloads.Conditional:
|
||||
size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2]))
|
||||
size := c.sizeOrUnknown(args[1]).Union(c.sizeOrUnknown(args[2]))
|
||||
resultEntrySize := c.computeEntrySize(args[1].Expr()).union(c.computeEntrySize(args[2].Expr()))
|
||||
c.setEntrySize(e, resultEntrySize)
|
||||
conditionalCost := argCosts[0]
|
||||
ifTrueCost := argCosts[1]
|
||||
ifFalseCost := argCosts[2]
|
||||
@@ -625,13 +822,19 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
return CallEstimate{CostEstimate: argCost, ResultSize: &size}
|
||||
case overloads.AddString, overloads.AddBytes, overloads.AddList:
|
||||
if len(args) == 2 {
|
||||
lhsSize := c.sizeEstimate(args[0])
|
||||
rhsSize := c.sizeEstimate(args[1])
|
||||
lhsSize := c.sizeOrUnknown(args[0])
|
||||
rhsSize := c.sizeOrUnknown(args[1])
|
||||
resultSize := lhsSize.Add(rhsSize)
|
||||
rhsEntrySize := c.computeEntrySize(args[0].Expr())
|
||||
lhsEntrySize := c.computeEntrySize(args[1].Expr())
|
||||
resultEntrySize := rhsEntrySize.union(lhsEntrySize)
|
||||
if resultEntrySize != nil {
|
||||
c.setEntrySize(e, resultEntrySize)
|
||||
}
|
||||
switch overloadID {
|
||||
case overloads.AddList:
|
||||
// list concatenation is O(1), but we handle it here to track size
|
||||
return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize}
|
||||
return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum()), ResultSize: &resultSize}
|
||||
default:
|
||||
return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize}
|
||||
}
|
||||
@@ -639,8 +842,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString,
|
||||
overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes,
|
||||
overloads.Equals, overloads.NotEquals:
|
||||
lhsCost := c.sizeEstimate(args[0])
|
||||
rhsCost := c.sizeEstimate(args[1])
|
||||
lhsCost := c.sizeOrUnknown(args[0])
|
||||
rhsCost := c.sizeOrUnknown(args[1])
|
||||
min := uint64(0)
|
||||
smallestMax := lhsCost.Max
|
||||
if rhsCost.Max < smallestMax {
|
||||
@@ -650,14 +853,16 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args
|
||||
min = 1
|
||||
}
|
||||
// equality of 2 scalar values results in a cost of 1
|
||||
return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())}
|
||||
return CallEstimate{
|
||||
CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()),
|
||||
}
|
||||
}
|
||||
// O(1) functions
|
||||
// See CostTracker.costCall for more details about O(1) cost calculations
|
||||
|
||||
// Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit
|
||||
// which on an Intel xeon 2.20GHz CPU is 50ns.
|
||||
return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())}
|
||||
return CallEstimate{CostEstimate: FixedCostEstimate(1).Add(argCostSum())}
|
||||
}
|
||||
|
||||
func (c *coster) getType(e ast.Expr) *types.Type {
|
||||
@@ -665,28 +870,145 @@ func (c *coster) getType(e ast.Expr) *types.Type {
|
||||
}
|
||||
|
||||
func (c *coster) getPath(e ast.Expr) []string {
|
||||
return c.exprPath[e.ID()]
|
||||
if e.Kind() == ast.IdentKind {
|
||||
if v, found := c.peekLocalVar(e.AsIdent()); found {
|
||||
return v.path[:]
|
||||
}
|
||||
}
|
||||
return c.exprPaths[e.ID()][:]
|
||||
}
|
||||
|
||||
func (c *coster) addPath(e ast.Expr, path []string) {
|
||||
c.exprPath[e.ID()] = path
|
||||
c.exprPaths[e.ID()] = path
|
||||
}
|
||||
|
||||
func isAccumulatorVar(name string) bool {
|
||||
return name == parser.AccumulatorName || name == parser.HiddenAccumulatorName
|
||||
}
|
||||
|
||||
func (c *coster) newAstNode(e ast.Expr) *astNode {
|
||||
path := c.getPath(e)
|
||||
if len(path) > 0 && path[0] == parser.AccumulatorName {
|
||||
if len(path) > 0 && isAccumulatorVar(path[0]) {
|
||||
// only provide paths to root vars; omit accumulator vars
|
||||
path = nil
|
||||
}
|
||||
var derivedSize *SizeEstimate
|
||||
if size, ok := c.computedSizes[e.ID()]; ok {
|
||||
derivedSize = &size
|
||||
}
|
||||
return &astNode{
|
||||
path: path,
|
||||
t: c.getType(e),
|
||||
expr: e,
|
||||
derivedSize: derivedSize}
|
||||
derivedSize: c.computeSize(e)}
|
||||
}
|
||||
|
||||
func (c *coster) setSize(e ast.Expr, size *SizeEstimate) {
|
||||
if size == nil {
|
||||
return
|
||||
}
|
||||
// Store the computed size with the expression
|
||||
c.computedSizes[e.ID()] = *size
|
||||
}
|
||||
|
||||
func (c *coster) sizeOrUnknown(node any) SizeEstimate {
|
||||
switch v := node.(type) {
|
||||
case ast.Expr:
|
||||
if sz := c.computeSize(v); sz != nil {
|
||||
return *sz
|
||||
}
|
||||
case AstNode:
|
||||
if sz := v.ComputedSize(); sz != nil {
|
||||
return *sz
|
||||
}
|
||||
}
|
||||
return UnknownSizeEstimate()
|
||||
}
|
||||
|
||||
func (c *coster) copySizeEstimates(dst, src ast.Expr) {
|
||||
c.setSize(dst, c.computeSize(src))
|
||||
c.setEntrySize(dst, c.computeEntrySize(src))
|
||||
}
|
||||
|
||||
func (c *coster) computeSize(e ast.Expr) *SizeEstimate {
|
||||
if size, ok := c.computedSizes[e.ID()]; ok {
|
||||
return &size
|
||||
}
|
||||
if size := computeExprSize(e); size != nil {
|
||||
return size
|
||||
}
|
||||
// Ensure size estimates are computed first as users may choose to override the costs that
|
||||
// CEL would otherwise ascribe to the type.
|
||||
node := astNode{expr: e, path: c.getPath(e), t: c.getType(e)}
|
||||
if size := c.estimator.EstimateSize(node); size != nil {
|
||||
// storing the computed size should reduce calls to EstimateSize()
|
||||
c.computedSizes[e.ID()] = *size
|
||||
return size
|
||||
}
|
||||
if size := computeTypeSize(c.getType(e)); size != nil {
|
||||
return size
|
||||
}
|
||||
if e.Kind() == ast.IdentKind {
|
||||
varName := e.AsIdent()
|
||||
if v, ok := c.peekLocalVar(varName); ok && v.size != nil {
|
||||
return v.size
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *coster) setEntrySize(e ast.Expr, size *entrySizeEstimate) {
|
||||
if size == nil {
|
||||
return
|
||||
}
|
||||
c.computedEntrySizes[e.ID()] = *size
|
||||
}
|
||||
|
||||
func (c *coster) computeEntrySize(e ast.Expr) *entrySizeEstimate {
|
||||
if sz, found := c.computedEntrySizes[e.ID()]; found {
|
||||
return &sz
|
||||
}
|
||||
if e.Kind() == ast.IdentKind {
|
||||
varName := e.AsIdent()
|
||||
if v, ok := c.peekLocalVar(varName); ok && v.entrySize != nil {
|
||||
return v.entrySize
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeExprSize(expr ast.Expr) *SizeEstimate {
|
||||
var v uint64
|
||||
switch expr.Kind() {
|
||||
case ast.LiteralKind:
|
||||
switch ck := expr.AsLiteral().(type) {
|
||||
case types.String:
|
||||
// converting to runes here is an O(n) operation, but
|
||||
// this is consistent with how size is computed at runtime,
|
||||
// and how the language definition defines string size
|
||||
v = uint64(len([]rune(ck)))
|
||||
case types.Bytes:
|
||||
v = uint64(len(ck))
|
||||
case types.Bool, types.Double, types.Duration,
|
||||
types.Int, types.Timestamp, types.Uint,
|
||||
types.Null:
|
||||
v = uint64(1)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
case ast.ListKind:
|
||||
v = uint64(expr.AsList().Size())
|
||||
case ast.MapKind:
|
||||
v = uint64(expr.AsMap().Size())
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
cost := FixedSizeEstimate(v)
|
||||
return &cost
|
||||
}
|
||||
|
||||
func computeTypeSize(t *types.Type) *SizeEstimate {
|
||||
if isScalar(t) {
|
||||
cost := FixedSizeEstimate(1)
|
||||
return &cost
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isScalar returns true if the given type is known to be of a constant size at
|
||||
@@ -696,10 +1018,24 @@ func isScalar(t *types.Type) bool {
|
||||
switch t.Kind() {
|
||||
case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind:
|
||||
return true
|
||||
case types.OpaqueKind:
|
||||
if t.TypeName() == "optional_type" {
|
||||
return isScalar(t.Parameters()[0])
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
doubleTwoTo64 = math.Ldexp(1.0, 64)
|
||||
|
||||
unknownSizeEstimate = SizeEstimate{Min: 0, Max: math.MaxUint64}
|
||||
unknownCostEstimate = unknownSizeEstimate.MultiplyByCostFactor(1)
|
||||
|
||||
selectAndIdentCost = FixedCostEstimate(common.SelectAndIdentCost)
|
||||
constCost = FixedCostEstimate(common.ConstCost)
|
||||
|
||||
createListBaseCost = FixedCostEstimate(common.ListCreateBaseCost)
|
||||
createMapBaseCost = FixedCostEstimate(common.MapCreateBaseCost)
|
||||
createMessageBaseCost = FixedCostEstimate(common.StructCreateBaseCost)
|
||||
)
|
||||
|
||||
29
vendor/github.com/google/cel-go/common/ast/factory.go
generated
vendored
29
vendor/github.com/google/cel-go/common/ast/factory.go
generated
vendored
@@ -40,15 +40,18 @@ type ExprFactory interface {
|
||||
NewIdent(id int64, name string) Expr
|
||||
|
||||
// NewAccuIdent creates an Expr value representing an accumulator identifier within a
|
||||
//comprehension.
|
||||
// comprehension.
|
||||
NewAccuIdent(id int64) Expr
|
||||
|
||||
// AccuIdentName reports the name of the accumulator variable to be used within a comprehension.
|
||||
AccuIdentName() string
|
||||
|
||||
// NewLiteral creates an Expr value representing a literal value, such as a string or integer.
|
||||
NewLiteral(id int64, value ref.Val) Expr
|
||||
|
||||
// NewList creates an Expr value representing a list literal expression with optional indices.
|
||||
//
|
||||
// Optional indicies will typically be empty unless the CEL optional types are enabled.
|
||||
// Optional indices will typically be empty unless the CEL optional types are enabled.
|
||||
NewList(id int64, elems []Expr, optIndices []int32) Expr
|
||||
|
||||
// NewMap creates an Expr value representing a map literal expression
|
||||
@@ -78,11 +81,23 @@ type ExprFactory interface {
|
||||
isExprFactory()
|
||||
}
|
||||
|
||||
type baseExprFactory struct{}
|
||||
type baseExprFactory struct {
|
||||
accumulatorName string
|
||||
}
|
||||
|
||||
// NewExprFactory creates an ExprFactory instance.
|
||||
func NewExprFactory() ExprFactory {
|
||||
return &baseExprFactory{}
|
||||
return &baseExprFactory{
|
||||
"@result",
|
||||
}
|
||||
}
|
||||
|
||||
// NewExprFactoryWithAccumulator creates an ExprFactory instance with a custom
|
||||
// accumulator identifier name.
|
||||
func NewExprFactoryWithAccumulator(id string) ExprFactory {
|
||||
return &baseExprFactory{
|
||||
id,
|
||||
}
|
||||
}
|
||||
|
||||
func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr {
|
||||
@@ -138,7 +153,11 @@ func (fac *baseExprFactory) NewIdent(id int64, name string) Expr {
|
||||
}
|
||||
|
||||
func (fac *baseExprFactory) NewAccuIdent(id int64) Expr {
|
||||
return fac.NewIdent(id, "__result__")
|
||||
return fac.NewIdent(id, fac.AccuIdentName())
|
||||
}
|
||||
|
||||
func (fac *baseExprFactory) AccuIdentName() string {
|
||||
return fac.accumulatorName
|
||||
}
|
||||
|
||||
func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user