1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Compare commits

..

6 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
839c506c3c Merge pull request #1108 from a7i/amir/helm-1.26.1
Update helm chart to v0.26.1
2023-04-04 10:33:45 -07:00
Amir Alavi
9dbf1af06f Update helm chart to v0.26.1 2023-04-03 15:01:04 -04:00
Kubernetes Prow Robot
bfe1885eb2 Merge pull request #1110 from a7i/update-1.26.1-refs
update v0.26.1 references
2023-04-03 08:39:52 -07:00
Amir Alavi
b1be089175 update v0.26.1 references 2023-04-03 11:25:55 -04:00
Kubernetes Prow Robot
5f1b31fcfc Merge pull request #1106 from a7i/release-1.26-patch
fix default value assignment of EvictLocalStoragePods
2023-04-03 04:17:52 -07:00
yanggang
bd9dea9979 fix if condition for the right value of EvictLocalStoragePods
Signed-off-by: yanggang <gang.yang@daocloud.io>
2023-04-02 20:20:36 -04:00
6254 changed files with 237153 additions and 719449 deletions

6
.gitattributes vendored
View File

@@ -1,6 +0,0 @@
# Always check-out / check-in files with LF line endings.
* text=auto eol=lf
go.sum merge=union
**/zz_generated.*.go linguist-generated=true
vendor/** linguist-vendored

View File

@@ -1,22 +0,0 @@
## Description
<!-- Please include a summary of the change and which issue is fixed -->
## Checklist
Please ensure your pull request meets the following criteria before submitting
for review, these items will be used by reviewers to assess the quality and
completeness of your changes:
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
- [ ] **Error Handling**: Are errors handled appropriately ?
- [ ] **Testing**: Are there sufficient unit/integration tests?
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
- [ ] **Dependencies**: Are new dependencies justified ?
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?

View File

@@ -20,42 +20,34 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v4.2.0
uses: azure/setup-helm@v2.1
with:
version: v3.15.1
version: v3.9.2
- uses: actions/setup-python@v5.1.1
- uses: actions/setup-python@v3.1.2
with:
python-version: 3.12
python-version: 3.7
- uses: actions/setup-go@v5
- uses: actions/setup-go@v3
with:
go-version-file: 'go.mod'
go-version: '1.19.3'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
uses: helm/chart-testing-action@v2.2.1
with:
version: v3.11.0
- name: Install Helm Unit Test Plugin
run: |
helm plugin install https://github.com/helm-unittest/helm-unittest
- name: Run Helm Unit Tests
run: |
helm unittest charts/descheduler --strict -d
version: v3.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config=.github/ci/ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
@@ -68,3 +60,10 @@ jobs:
# helm-extra-set-args only available after ct 3.6.0
- name: Run chart-testing (install)
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'
- name: E2E after chart install
env:
KUBERNETES_VERSION: "v1.26.0"
KIND_E2E: true
SKIP_INSTALL: true
run: make test-e2e

View File

@@ -1,44 +0,0 @@
name: manifests
on:
pull_request:
jobs:
deploy:
strategy:
matrix:
k8s-version: ["v1.34.0"]
descheduler-version: ["v0.34.0"]
descheduler-api: ["v1alpha2"]
manifest: ["deployment"]
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v4
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
with:
node_image: kindest/node:${{ matrix.k8s-version }}
kubectl_version: ${{ matrix.k8s-version }}
config: test/kind-config.yaml
version: ${{ matrix.kind-version }}
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
cache: true
- name: Build image
run: |
VERSION="dev" make dev-image
docker tag descheduler:dev registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }}
- name: Kind load image
run: |
kind load docker-image registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }} --name chart-testing
- name: Create k8s manifests
run: |
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f test/manifests/${{ matrix.descheduler-api }}/configmap.yaml
kubectl create -f kubernetes/${{ matrix.manifest }}/${{ matrix.manifest }}.yaml
- name: Wait for ready condition
run: |
kubectl wait --for=condition=Available --timeout=60s ${{ matrix.manifest }} descheduler -n kube-system

View File

@@ -5,9 +5,6 @@ on:
branches:
- release-*
permissions:
contents: write # allow actions to update gh-pages branch
jobs:
release:
runs-on: ubuntu-latest
@@ -23,12 +20,12 @@ jobs:
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v4.2.0
uses: azure/setup-helm@v1
with:
version: v3.15.1
version: v3.4.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
uses: helm/chart-releaser-action@v1.1.0
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"

View File

@@ -22,7 +22,7 @@ jobs:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0

View File

@@ -1,5 +1,5 @@
run:
timeout: 5m
deadline: 2m
linters:
disable-all: true
@@ -15,4 +15,4 @@ linters-settings:
gofumpt:
extra-rules: true
goimports:
local-prefixes: sigs.k8s.io/descheduler
local-prefixes: sigs.k8s.io/descheduler

View File

@@ -1,30 +0,0 @@
# Descheduler Design Constraints
This is a slowly growing document that lists good practices, conventions, and design decisions.
## Overview
TBD
## Code convention
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
## Unit Test Conventions
These are the known conventions that are useful to practice whenever reasonable:
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
The aim is to reduce cognitive load when reading and debugging the test code.
## Design Decisions FAQ
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.24.6
FROM golang:1.19.3
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .
@@ -21,9 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
FROM scratch
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000

View File

@@ -13,7 +13,7 @@
# limitations under the License.
FROM scratch
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
USER 1000

View File

@@ -26,14 +26,12 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.64.8
GOLANGCI_VERSION := v1.49.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOFUMPT_VERSION := v0.7.0
GOFUMPT_VERSION := v0.4.0
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
# REGISTRY is the container registry to push
# into. The default is to push to the staging
# registry, not production.
@@ -45,10 +43,6 @@ IMAGE:=descheduler:$(VERSION)
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# CURRENT_DIR is the current dir where the Makefile exists
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# TODO: upload binaries to GCS bucket
#
# In the future binaries can be uploaded to
@@ -106,7 +100,7 @@ clean:
rm -rf _output
rm -rf _tmp
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-gen
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
verify-govet:
./hack/verify-govet.sh
@@ -120,8 +114,8 @@ verify-gofmt:
verify-vendor:
./hack/verify-vendor.sh
verify-docs:
./hack/verify-docs.sh
verify-toc:
./hack/verify-toc.sh
test-unit:
./test/run-unit-tests.sh
@@ -133,22 +127,18 @@ gen:
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
./hack/update-docs.sh
gen-docker:
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
./hack/update-toc.sh
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
./hack/verify-docs.sh
lint:
ifndef HAS_GOLANGCI
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif
./_output/bin/golangci-lint run -v
./_output/bin/golangci-lint run
fmt:
ifndef HAS_GOFUMPT

5
OWNERS
View File

@@ -3,8 +3,6 @@ approvers:
- ingvagabund
- seanmalloy
- a7i
- knelasevero
- ricardomaraschini
reviewers:
- damemi
- seanmalloy
@@ -13,9 +11,6 @@ reviewers:
- a7i
- janeliul
- knelasevero
- jklaw90
- googs1025
- ricardomaraschini
emeritus_approvers:
- aveshagarwal
- k82cn

886
README.md

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.34.0
appVersion: 0.34.0
version: 0.26.1
appVersion: 0.26.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes
@@ -13,4 +13,4 @@ sources:
- https://github.com/kubernetes-sigs/descheduler
maintainers:
- name: Kubernetes SIG Scheduling
email: sig-scheduling@kubernetes.io
email: kubernetes-sig-scheduling@googlegroups.com

View File

@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
## Introduction
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
@@ -52,11 +52,9 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `timeZone` | configure `timeZone` for CronJob | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
@@ -64,23 +62,19 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
@@ -88,7 +82,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |

View File

@@ -1,7 +1,7 @@
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq (.Values.replicas | int) 1 }}
{{- if eq .Values.replicas 1.0}}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- if .Values.leaderElection }}
@@ -10,13 +10,3 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
{{- end}}
{{- end}}
{{- end}}
{{- if .Values.deschedulerPolicy }}
A DeschedulerPolicy has been applied for you. You can view the policy with:
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
deschedulerPolicy: {}
{{- end }}

View File

@@ -24,14 +24,6 @@ If release name contains chart name it will be used as a full name.
{{- end -}}
{{- end -}}
{{/*
Expand the namespace of the release.
Allows overriding it for multi-namespace deployments in combined charts.
*/}}
{{- define "descheduler.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
@@ -95,10 +87,8 @@ Leader Election
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
{{- if $resourceNamespace -}}
- --leader-elect-resource-namespace={{ $resourceNamespace }}
{{- if .Values.leaderElection.resourceNamescape }}
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -24,9 +24,6 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
@@ -36,13 +33,4 @@ rules:
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- if and .Values.deschedulerPolicy }}
{{- range .Values.deschedulerPolicy.metricsProviders }}
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
{{- end }}
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -12,5 +12,5 @@ roleRef:
subjects:
- kind: ServiceAccount
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View File

@@ -1,14 +1,12 @@
{{- if .Values.deschedulerPolicy }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:
policy.yaml: |
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
{{- end }}
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}

View File

@@ -3,16 +3,9 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
{{- if .Values.cronJobAnnotations }}
annotations:
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
{{- end }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.cronJobLabels }}
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
{{- end }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
@@ -22,34 +15,17 @@ spec:
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }}
{{- if ne .Values.successfulJobsHistoryLimit nil }}
{{- if .Values.successfulJobsHistoryLimit }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }}
{{- if ne .Values.failedJobsHistoryLimit nil }}
{{- if .Values.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }}
{{- if .Values.timeZone }}
timeZone: {{ .Values.timeZone }}
{{- end }}
jobTemplate:
{{- if or .Values.jobAnnotations .Values.jobLabels }}
metadata:
{{- if .Values.jobAnnotations }}
annotations:
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.jobLabels }}
labels:
{{- .Values.jobLabels | toYaml | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- if .Values.ttlSecondsAfterFinished }}
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
{{- end }}
{{- if .Values.activeDeadlineSeconds }}
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
{{- end }}
template:
metadata:
name: {{ template "descheduler.fullname" . }}
@@ -72,14 +48,6 @@ spec:
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
@@ -88,9 +56,6 @@ spec:
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@@ -101,41 +66,33 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 16 }}
- "/bin/descheduler"
args:
- --policy-config-file=/policy-dir/policy.yaml
- "--policy-config-file"
- "/policy-dir/policy.yaml"
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
ports:
{{- toYaml .Values.ports | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 16 }}
{{- end }}
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
{{- end }}
{{- end }}

View File

@@ -3,14 +3,11 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.annotations }}
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
{{- end }}
spec:
{{- if gt (.Values.replicas | int) 1 }}
{{- if gt .Values.replicas 1.0}}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
@@ -34,67 +31,54 @@ spec:
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 12 }}
- "/bin/descheduler"
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.leaderElection.enabled }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
{{- end }}
ports:
{{- toYaml .Values.ports | nindent 12 }}
- containerPort: 10258
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
{{- end }}
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
@@ -103,10 +87,6 @@ spec:
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}

View File

@@ -6,15 +6,9 @@ metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
{{- end }}
ports:
- name: http-metrics
port: 10258

View File

@@ -1,12 +1,9 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
{{- end }}
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}

View File

@@ -7,14 +7,11 @@ metadata:
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ include "descheduler.namespace" . }}
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}

View File

@@ -1 +0,0 @@
__snapshot__

View File

@@ -1,109 +0,0 @@
suite: Test Descheduler CronJob and Job Annotations and Labels
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: adds cronJob and job annotations and labels when set
template: templates/cronjob.yaml
set:
cronJobAnnotations:
monitoring.company.com/scrape: "true"
description: "test cronjob"
cronJobLabels:
environment: "test"
team: "platform"
jobAnnotations:
sidecar.istio.io/inject: "false"
job.company.com/retry-limit: "3"
jobLabels:
job-type: "maintenance"
priority: "high"
asserts:
- equal:
path: metadata.annotations["monitoring.company.com/scrape"]
value: "true"
- equal:
path: metadata.annotations.description
value: "test cronjob"
- equal:
path: metadata.labels.environment
value: "test"
- equal:
path: metadata.labels.team
value: "platform"
- equal:
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
value: "false"
- equal:
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
value: "3"
- equal:
path: spec.jobTemplate.metadata.labels.job-type
value: "maintenance"
- equal:
path: spec.jobTemplate.metadata.labels.priority
value: "high"
- it: does not add cronJob and job metadata when not set
template: templates/cronjob.yaml
asserts:
- isNull:
path: metadata.annotations
- isNotNull:
path: metadata.labels
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: descheduler
- isNull:
path: spec.jobTemplate.metadata
- it: does not add job metadata when job annotations and labels are empty
template: templates/cronjob.yaml
set:
jobAnnotations: {}
jobLabels: {}
asserts:
- isNull:
path: spec.jobTemplate.metadata
- it: works with all annotation and label types together
template: templates/cronjob.yaml
set:
cronJobAnnotations:
cron-annotation: "cron-value"
cronJobLabels:
cron-label: "cron-value"
jobAnnotations:
job-annotation: "job-value"
jobLabels:
job-label: "job-value"
podAnnotations:
pod-annotation: "pod-value"
podLabels:
pod-label: "pod-value"
asserts:
- equal:
path: metadata.annotations.cron-annotation
value: "cron-value"
- equal:
path: metadata.labels.cron-label
value: "cron-value"
- equal:
path: spec.jobTemplate.metadata.annotations.job-annotation
value: "job-value"
- equal:
path: spec.jobTemplate.metadata.labels.job-label
value: "job-value"
- equal:
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
value: "pod-value"
- equal:
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
value: "pod-value"

View File

@@ -1,17 +0,0 @@
suite: Test Descheduler CronJob
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: creates CronJob when kind is set
template: templates/cronjob.yaml
asserts:
- isKind:
of: CronJob

View File

@@ -1,49 +0,0 @@
suite: Test Descheduler Deployment
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: Deployment
tests:
- it: creates Deployment when kind is set
template: templates/deployment.yaml
asserts:
- isKind:
of: Deployment
- it: enables leader-election
set:
leaderElection:
enabled: true
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect=true
- it: support leader-election resourceNamespace
set:
leaderElection:
enabled: true
resourceNamespace: test
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=test
- it: support legacy leader-election resourceNamescape
set:
leaderElection:
enabled: true
resourceNamescape: typo
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=typo

View File

@@ -18,34 +18,13 @@ resources:
requests:
cpu: 500m
memory: 256Mi
limits:
cpu: 500m
memory: 256Mi
ports:
- containerPort: 10258
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
podSecurityContext: {}
# fsGroup: 1000
# limits:
# cpu: 100m
# memory: 128Mi
nameOverride: ""
fullnameOverride: ""
# -- Override the deployment namespace; defaults to .Release.Namespace
namespaceOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
@@ -55,9 +34,7 @@ suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
# failedJobsHistoryLimit: 1
# ttlSecondsAfterFinished: 600
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
# timeZone: Etc/UTC
# ttlSecondsAfterFinished 600
# Required when running as a Deployment
deschedulingInterval: 5m
@@ -78,75 +55,51 @@ leaderElection: {}
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamespace: "kube-system"
command:
- "/bin/descheduler"
# resourceNamescape: "kube-system"
cmdOptions:
v: 3
# Recommended to use the latest Policy API version supported by the Descheduler app version
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
# deschedulerPolicy contains the policies the descheduler will execute.
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# metricsProviders:
# - source: KubernetesMetrics
# tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: ""
# serviceName: ""
# serviceNamespace: ""
# sampleRate: 1.0
# fallbackToNoOpProviderOnError: true
profiles:
- name: default
pluginConfig:
- name: DefaultEvictor
args:
podProtections:
defaultDisabled:
- "PodsWithLocalStorage"
extraEnabled:
- "PodsWithPVC"
- name: RemoveDuplicates
- name: RemovePodsHavingTooManyRestarts
args:
podRestartThreshold: 100
includingInitContainers: true
- name: RemovePodsViolatingNodeAffinity
args:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- name: LowNodeUtilization
args:
thresholds:
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 50
memory: 50
pods: 50
plugins:
balance:
enabled:
- RemoveDuplicates
- RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization
deschedule:
enabled:
- RemovePodsHavingTooManyRestarts
- RemovePodsViolatingNodeTaints
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingInterPodAntiAffinity
# ignorePvcPods: true
# evictLocalStoragePods: true
strategies:
RemoveDuplicates:
enabled: true
RemovePodsHavingTooManyRestarts:
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
RemovePodsViolatingNodeTaints:
enabled: true
RemovePodsViolatingNodeAffinity:
enabled: true
params:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingTopologySpreadConstraint:
enabled: true
params:
includeSoftConstraints: false
LowNodeUtilization:
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
cpu: 20
memory: 20
pods: 20
targetThresholds:
cpu: 50
memory: 50
pods: 50
priorityClassName: system-cluster-critical
@@ -172,13 +125,6 @@ affinity: {}
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/name: descheduler
tolerations: []
# - key: 'management'
# operator: 'Equal'
@@ -197,62 +143,27 @@ serviceAccount:
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
# Opt out of API credential automounting
#
# automountServiceAccountToken Default is not set
# automountServiceAccountToken: true
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
# Default is not set - but only implied by the ServiceAccount
# automountServiceAccountToken: true
# Annotations that'll be applied to deployment
deploymentAnnotations: {}
cronJobAnnotations: {}
cronJobLabels: {}
jobAnnotations: {}
jobLabels: {}
podAnnotations: {}
podLabels: {}
dnsConfig: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 5
periodSeconds: 20
timeoutSeconds: 5
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
#
ipFamilyPolicy: ""
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
# E.g.
# ipFamilies:
# - IPv6
# - IPv4
ipFamilies: []
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
additionalLabels: {}
# prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true
@@ -268,30 +179,3 @@ serviceMonitor:
# targetLabel: nodename
# replacement: $1
# action: replace
## Additional Volume mounts when automountServiceAccountToken is false
# extraServiceAccountVolumeMounts:
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
# name: kube-api-access
# readOnly: true
## Additional Volumes when automountServiceAccountToken is false
# extraServiceAccountVolumes:
# - name: kube-api-access
# projected:
# defaultMode: 0444
# sources:
# - configMap:
# items:
# - key: ca.crt
# path: ca.crt
# name: kube-root-ca.crt
# - downwardAPI:
# items:
# - fieldRef:
# apiVersion: v1
# fieldPath: metadata.namespace
# path: namespace
# - serviceAccountToken:
# expirationSeconds: 3600
# path: token

View File

@@ -18,30 +18,17 @@ limitations under the License.
package options
import (
"strings"
"time"
promapi "github.com/prometheus/client_golang/api"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserver "k8s.io/apiserver/pkg/server"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
cliflag "k8s.io/component-base/cli/flag"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/features"
"sigs.k8s.io/descheduler/pkg/tracing"
)
const (
@@ -52,18 +39,10 @@ const (
type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
EventClient clientset.Interface
MetricsClient metricsclient.Interface
PrometheusClient promapi.Client
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
SecureServingInfo *apiserver.SecureServingInfo
DisableMetrics bool
EnableHTTP2 bool
// FeatureGates enabled by the user
FeatureGates map[string]bool
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
DefaultFeatureGates featuregate.FeatureGate
Client clientset.Interface
EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool
}
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
@@ -95,9 +74,7 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
},
}
deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{
Tracing: componentconfig.TracingConfiguration{},
}
cfg := componentconfig.DeschedulerConfiguration{}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
return nil, err
}
@@ -106,48 +83,14 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
fs.Float32Var(&rs.ClientConnection.QPS, "client-connection-qps", rs.ClientConnection.QPS, "QPS to use for interacting with kubernetes apiserver.")
fs.Int32Var(&rs.ClientConnection.Burst, "client-connection-burst", rs.ClientConnection.Burst, "Burst to use for interacting with kubernetes apiserver.")
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs)
}
func (rs *DeschedulerServer) Apply() error {
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
if err != nil {
return err
}
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
// loopbackClientConfig is a config for a privileged loopback connection
var loopbackClientConfig *restclient.Config
var secureServing *apiserver.SecureServingInfo
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err
}
if secureServing != nil {
secureServing.DisableHTTP2 = !rs.EnableHTTP2
rs.SecureServingInfo = secureServing
}
return nil
}

View File

@@ -20,23 +20,22 @@ package app
import (
"context"
"io"
"os"
"os/signal"
"syscall"
"time"
"github.com/spf13/cobra"
"k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/tracing"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/server/healthz"
"github.com/spf13/cobra"
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
"k8s.io/component-base/featuregate"
"k8s.io/component-base/logs"
logsapi "k8s.io/component-base/logs/api/v1"
restclient "k8s.io/client-go/rest"
registry "k8s.io/component-base/logs/api/v1"
jsonLog "k8s.io/component-base/logs/json"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
@@ -49,91 +48,73 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
klog.ErrorS(err, "unable to initialize server")
}
featureGate := featuregate.NewFeatureGate()
logConfig := logsapi.NewLoggingConfiguration()
cmd := &cobra.Command{
Use: "descheduler",
Short: "descheduler",
Long: "The descheduler evicts pods which may be bound to less desired nodes",
PreRunE: func(cmd *cobra.Command, args []string) error {
logs.InitLogs()
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
return err
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
// s.Logs.Config.Format = s.Logging.Format
// LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config
var SecureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return
}
descheduler.SetupPlugins()
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if err = s.Apply(); err != nil {
klog.ErrorS(err, "failed to apply")
return err
var factory registry.LogFormatFactory
if s.Logging.Format == "json" {
factory = jsonLog.Factory{}
}
if err = Run(cmd.Context(), s); err != nil {
klog.ErrorS(err, "failed to run descheduler server")
return err
if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(registry.LoggingConfiguration{
Format: s.Logging.Format,
Verbosity: s.Logging.Verbosity,
})
defer logrFlush()
klog.SetLogger(log)
}
return nil
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return
}
err = Run(ctx, s)
if err != nil {
klog.ErrorS(err, "descheduler server")
}
done()
// wait for metrics server to close
<-stoppedCh
},
}
cmd.SetOut(out)
flags := cmd.Flags()
s.AddFlags(flags)
runtime.Must(logsapi.AddFeatureGates(featureGate))
logsapi.AddFlags(logConfig, flags)
return cmd
}
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !rs.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
var stoppedCh <-chan struct{}
var err error
if rs.SecureServingInfo != nil {
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err
}
}
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil {
klog.ErrorS(err, "failed to create tracer provider")
}
defer func() {
// we give the tracing.Shutdown() its own context as the
// original context may have been cancelled already. we
// have arbitrarily chosen the timeout duration.
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
tracing.Shutdown(ctx)
}()
// increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods
watch.DefaultChanSize = 100000
err = descheduler.Run(ctx, rs)
if err != nil {
return err
}
done()
if stoppedCh != nil {
// wait for metrics server to close
<-stoppedCh
}
return nil
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return descheduler.Run(ctx, rs)
}
func SetupLogs() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}

View File

@@ -21,8 +21,14 @@ import (
"k8s.io/component-base/cli"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
"sigs.k8s.io/descheduler/pkg/descheduler"
)
func init() {
app.SetupLogs()
descheduler.SetupPlugins()
}
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)

View File

@@ -1,70 +0,0 @@
## descheduler
descheduler
### Synopsis
The descheduler evicts pods which may be bound to less desired nodes
```
descheduler [flags]
```
### Options
```
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run Execute descheduler in dry run mode.
--enable-http2 If http/2 should be enabled for the metrics and health check
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
EvictionsInBackground=true|false (ALPHA - default=false)
-h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
--otel-sample-rate float Sample rate to collect the Traces (default 1)
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
```
### SEE ALSO
* [descheduler version](descheduler_version.md) - Version of descheduler

View File

@@ -1,22 +0,0 @@
## descheduler version
Version of descheduler
### Synopsis
Prints the version of descheduler.
```
descheduler version [flags]
```
### Options
```
-h, --help help for version
```
### SEE ALSO
* [descheduler](descheduler.md) - descheduler

View File

@@ -3,7 +3,7 @@
## Required Tools
- [Git](https://git-scm.com/downloads)
- [Go 1.23+](https://golang.org/dl/)
- [Go 1.16+](https://golang.org/dl/)
- [Docker](https://docs.docker.com/install/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
@@ -20,7 +20,7 @@ make
Run descheduler.
```sh
./_output/bin/descheduler --client-connection-kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
```
View all CLI options.

View File

@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
4. Cut release branch from `master`, eg `release-1.24`
5. Publish release using Github's release process from the git tag you created
6. Email `sig-scheduling@kubernetes.io` to announce the release
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
**Patch release**
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
3. Merge Helm chart version update to release branch
4. Perform the image promotion process for the patch version
5. Publish release using Github's release process from the git tag you created
6. Email `sig-scheduling@kubernetes.io` to announce the release
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
### Flowchart

View File

@@ -4,13 +4,19 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler
@@ -25,7 +31,65 @@ kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
## CLI Options
The descheduler has many CLI options that can be used to override its default behavior. Please check the [CLI Options](./cli/descheduler.md) documentation for details
The descheduler has many CLI options that can be used to override its default behavior.
```
descheduler --help
The descheduler evicts pods which may be bound to less desired nodes
Usage:
descheduler [flags]
descheduler [command]
Available Commands:
completion generate the autocompletion script for the specified shell
help Help about any command
version Version of descheduler
Flags:
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run execute descheduler in dry run mode.
-h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
Use "descheduler [command] --help" for more information about a command.
```
## Production Use Cases
This section contains descriptions of real world production use cases.
@@ -47,23 +111,19 @@ descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.y
This policy configuration file ensures that pods created more than 7 days ago are evicted.
```
---
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "PodLifeTime"
args:
maxPodLifeTimeSeconds: 604800
plugins:
deschedule:
enabled:
- "PodLifeTime"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
```
### Balance Cluster By Node Memory Utilization
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
or `number of pods`.
#### Balance high utilization nodes
@@ -71,21 +131,17 @@ Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memo
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
```
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "LowNodeUtilization"
args:
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70
plugins:
balance:
enabled:
- "LowNodeUtilization"
```
#### Balance low utilization nodes
@@ -94,19 +150,15 @@ from nodes with memory utilization lower than 20%. This should be use `NodeResou
The evicted pods will be compacted into minimal set of nodes.
```
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "HighNodeUtilization"
args:
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
plugins:
balance:
enabled:
- "HighNodeUtilization"
```
### Autoheal Node Problems

View File

@@ -1,18 +1,14 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemoveFailedPods"
args:
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
plugins:
deschedule:
enabled:
- "RemoveFailedPods"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -1,13 +1,9 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "HighNodeUtilization"
args:
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
plugins:
balance:
enabled:
- "HighNodeUtilization"

View File

@@ -1,15 +1,11 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "LowNodeUtilization"
args:
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70
plugins:
balance:
enabled:
- "LowNodeUtilization"

View File

@@ -1,13 +1,8 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsViolatingNodeAffinity"
args:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
plugins:
deschedule:
enabled:
- "RemovePodsViolatingNodeAffinity"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"

View File

@@ -1,15 +1,11 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "PodLifeTime"
args:
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # 7 days
states:
- "Pending"
- "PodInitializing"
plugins:
deschedule:
enabled:
- "PodLifeTime"

View File

@@ -1,36 +1,29 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemoveDuplicates"
- name: "RemovePodsViolatingInterPodAntiAffinity"
- name: "LowNodeUtilization"
args:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
- name: "RemovePodsHavingTooManyRestarts"
args:
podRestartThreshold: 100
includingInitContainers: true
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
deschedule:
enabled:
- "RemovePodsViolatingInterPodAntiAffinity"
- "RemovePodsHavingTooManyRestarts"
balance:
enabled:
- "RemoveDuplicates"
- "LowNodeUtilization"
- "RemovePodsViolatingTopologySpreadConstraint"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -1,15 +0,0 @@
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsHavingTooManyRestarts"
args:
podRestartThreshold: 100
includingInitContainers: true
states:
- CrashLoopBackOff
plugins:
deschedule:
enabled:
- "RemovePodsHavingTooManyRestarts"

View File

@@ -1,14 +1,8 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
balance:
enabled:
- "RemovePodsViolatingTopologySpreadConstraint"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
nodeFit: true
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

198
go.mod
View File

@@ -1,138 +1,110 @@
module sigs.k8s.io/descheduler
go 1.24.0
toolchain go1.24.3
godebug default=go1.24
go 1.19
require (
github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.7.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.64.0
github.com/spf13/cobra v1.10.0
github.com/spf13/pflag v1.0.9
go.opentelemetry.io/otel v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
go.opentelemetry.io/otel/sdk v1.36.0
go.opentelemetry.io/otel/trace v1.36.0
google.golang.org/grpc v1.72.2
k8s.io/api v0.34.0
k8s.io/apimachinery v0.34.0
k8s.io/apiserver v0.34.0
k8s.io/client-go v0.34.0
k8s.io/code-generator v0.34.0
k8s.io/component-base v0.34.0
k8s.io/component-helpers v0.34.0
k8s.io/klog/v2 v2.130.1
k8s.io/metrics v0.34.0
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
kubevirt.io/api v1.3.0
kubevirt.io/client-go v1.3.0
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
sigs.k8s.io/mdtoc v1.1.0
sigs.k8s.io/yaml v1.6.0
github.com/google/go-cmp v0.5.9
github.com/spf13/cobra v1.6.0
github.com/spf13/pflag v1.0.5
k8s.io/api v0.26.0
k8s.io/apimachinery v0.26.0
k8s.io/apiserver v0.26.0
k8s.io/client-go v0.26.0
k8s.io/code-generator v0.26.0
k8s.io/component-base v0.26.0
k8s.io/component-helpers v0.26.0
k8s.io/klog/v2 v2.80.1
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
sigs.k8s.io/mdtoc v1.0.1
)
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
require (
cel.dev/expr v0.24.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-kit/kit v0.13.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.2.4 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/cel-go v0.26.0 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/cel-go v0.12.5 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.25.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.26.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
go.opentelemetry.io/otel v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
go.opentelemetry.io/otel/metric v0.31.0 // indirect
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
go.opentelemetry.io/otel/trace v1.10.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.30.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
k8s.io/kms v0.34.0 // indirect
k8s.io/kube-openapi v0.30.0 // indirect
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/kms v0.26.0 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b

1083
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,257 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
# meta.) Assumes you care about pulls from remote "upstream" and
# checks them out to a branch named:
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
set -o errexit
set -o nounset
set -o pipefail
REPO_ROOT="$(git rev-parse --show-toplevel)"
declare -r REPO_ROOT
cd "${REPO_ROOT}"
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
declare -r STARTINGBRANCH
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
DRY_RUN=${DRY_RUN:-""}
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
FORK_REMOTE=${FORK_REMOTE:-origin}
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
if [[ -z ${GITHUB_USER:-} ]]; then
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
exit 1
fi
if ! command -v gh > /dev/null; then
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
exit 1
fi
if [[ "$#" -lt 2 ]]; then
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
echo
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
echo " Examples:"
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
echo
echo " Set the DRY_RUN environment var to skip git push and creating PR."
echo " This is useful for creating patches to a release branch without making a PR."
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
echo
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
echo " This is useful when picking commits containing changes to API documentation."
echo
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
echo " to override the default remote names to what you have locally."
echo
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
exit 2
fi
# Checks if you are logged in. Will error/bail if you are not.
gh auth status
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
echo "!!! Dirty tree. Clean up and try again."
exit 1
fi
if [[ -e "${REBASEMAGIC}" ]]; then
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
exit 1
fi
declare -r BRANCH="$1"
shift 1
declare -r PULLS=( "$@" )
function join { local IFS="$1"; shift; echo "$*"; }
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
declare -r PULLDASH
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
declare -r PULLSUBJ
echo "+++ Updating remotes..."
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
exit 1
fi
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
declare -r NEWBRANCHREQ
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
declare -r NEWBRANCH
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
declare -r NEWBRANCHUNIQ
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
cleanbranch=""
gitamcleanup=false
function return_to_kansas {
if [[ "${gitamcleanup}" == "true" ]]; then
echo
echo "+++ Aborting in-progress git am."
git am --abort >/dev/null 2>&1 || true
fi
# return to the starting branch and delete the PR text file
if [[ -z "${DRY_RUN}" ]]; then
echo
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
if [[ -n "${cleanbranch}" ]]; then
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
fi
fi
}
trap return_to_kansas EXIT
SUBJECTS=()
function make-a-pr() {
local rel
rel="$(basename "${BRANCH}")"
echo
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
local numandtitle
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
prtext=$(cat <<EOF
Cherry pick of ${PULLSUBJ} on ${rel}.
${numandtitle}
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
\`\`\`release-note
\`\`\`
EOF
)
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
}
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
cleanbranch="${NEWBRANCHUNIQ}"
gitamcleanup=true
for pull in "${PULLS[@]}"; do
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
echo
echo "+++ About to attempt cherry pick of PR. To reattempt:"
echo " $ git am -3 /tmp/${pull}.patch"
echo
git am -3 "/tmp/${pull}.patch" || {
conflicts=false
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|| [[ -e "${REBASEMAGIC}" ]]; do
conflicts=true # <-- We should have detected conflicts once
echo
echo "+++ Conflicts detected:"
echo
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
echo
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
echo
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
done
if [[ "${conflicts}" != "true" ]]; then
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
exit 1
fi
}
# set the subject
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
SUBJECTS+=("#${pull}: ${subject}")
# remove the patch file from /tmp
rm -f "/tmp/${pull}.patch"
done
gitamcleanup=false
# Re-generate docs (if needed)
if [[ -n "${REGENERATE_DOCS}" ]]; then
echo
echo "Regenerating docs..."
if ! hack/generate-docs.sh; then
echo
echo "hack/generate-docs.sh FAILED to complete."
exit 1
fi
fi
if [[ -n "${DRY_RUN}" ]]; then
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
echo "To return to the branch you were in when you invoked this script:"
echo
echo " git checkout ${STARTINGBRANCH}"
echo
echo "To delete this branch:"
echo
echo " git branch -D ${NEWBRANCHUNIQ}"
exit 0
fi
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
echo "This isn't normal. Leaving you with push instructions:"
echo
echo "+++ First manually push the branch this script created:"
echo
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
echo
make-a-pr
cleanbranch=""
exit 0
fi
echo
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
echo
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
make-a-pr

View File

@@ -1,20 +0,0 @@
package main
import (
"log"
"os"
"github.com/spf13/cobra/doc"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
var docGenPath = "docs/cli"
func main() {
cmd := app.NewDeschedulerCommand(os.Stdout)
cmd.AddCommand(app.NewVersionCommand())
cmd.DisableAutoGenTag = true // Disable this so that the diff wont track it
if err := doc.GenMarkdownTree(cmd, docGenPath); err != nil {
log.Fatal(err)
}
}

View File

@@ -20,6 +20,6 @@ function find_dirs_containing_comment_tags() {
| LC_ALL=C sort -u \
)
IFS=" ";
IFS=",";
printf '%s' "${array[*]}";
}

View File

@@ -1,26 +0,0 @@
#!/bin/bash
# Copyright 2024 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# go::verify_version verifies the go version is supported by the project.
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
go::verify_version() {
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
}

View File

@@ -1,23 +0,0 @@
#!/bin/bash
# Copyright 2023 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
go run ${SCRIPT_DIR}/doc-gen

View File

@@ -6,5 +6,5 @@ go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/con
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--output-file zz_generated.conversion.go \
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
--output-file-base zz_generated.conversion

View File

@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--output-file zz_generated.deepcopy.go \
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
--output-file-base zz_generated.deepcopy

View File

@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
--output-file zz_generated.defaults.go \
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults

View File

@@ -20,9 +20,13 @@ set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
go::verify_version
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"

25
hack/update-toc.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md

View File

@@ -1,35 +0,0 @@
#!/bin/bash
# Copyright 2023 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
temp_dir=$(mktemp -d)
go run -ldflags "-X main.docGenPath=${temp_dir}" ${SCRIPT_DIR}/doc-gen
if ! _out="$(diff -Naupr ${SCRIPT_DIR}/../docs/cli "${temp_dir}")"; then
echo "Generated output differs:" >&2
echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-docs.sh"
rm -rf ${temp_dir}
exit 1
fi
rm -rf ${temp_dir}

View File

@@ -20,9 +20,13 @@ set -o nounset
set -o pipefail
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
go::verify_version
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi
cd "${DESCHEDULER_ROOT}"

29
hack/verify-toc.sh Executable file
View File

@@ -0,0 +1,29 @@
#!/bin/bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
then
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
exit 1
fi

View File

@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
ret=1
fi
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
echo "Your vendored results are different:" >&2
echo "${_out}" >&2
echo "Vendor Verify failed." >&2

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
title: descheduler integration with evacuation API as an alternative to eviction API
kep-number: 1397
authors:
- "@ingvagabund"
owning-sig: sig-scheduling
participating-sigs:
- sig-apps
status: provisional
creation-date: 2024-04-14
reviewers:
- atiratree
approvers:
- TBD
feature-gates:
- TBD
stage: alpha

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

View File

@@ -1,29 +0,0 @@
title: Descheduling framework
kep-number: 753
authors:
- "@ingvagabund"
- "@damemi"
owning-sig: sig-scheduling
status: provisional
creation-date: 2024-04-08
reviewers:
- "@ingvagabund"
- "@damemi"
- "@a7i"
- "@knelasevero"
approvers:
- "@ingvagabund"
- "@damemi"
- "@a7i"
- "@knelasevero"
#replaces:
# The target maturity stage in the current dev cycle for this KEP.
stage: alpha
# The most recent milestone for which work toward delivery of this KEP has been
# done. This can be the current (upcoming) milestone, if it is being actively
# worked on.
# latest-milestone: "v1.19"
# disable-supported: true

View File

@@ -6,29 +6,23 @@ metadata:
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
- name: "RemovePodsViolatingInterPodAntiAffinity"
- name: "RemoveDuplicates"
- name: "LowNodeUtilization"
args:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
plugins:
balance:
enabled:
- "LowNodeUtilization"
- "RemoveDuplicates"
deschedule:
enabled:
- "RemovePodsViolatingInterPodAntiAffinity"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50

View File

@@ -22,31 +22,13 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
- apiGroups: ["metrics.k8s.io"]
resources: ["nodes", "pods"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "watch", "list"]
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
@@ -66,16 +48,3 @@ subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: descheduler-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: descheduler-role
subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.34.0
image: registry.k8s.io/descheduler/descheduler:v0.26.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.34.0
image: registry.k8s.io/descheduler/descheduler:v0.26.1
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.34.0
image: registry.k8s.io/descheduler/descheduler:v0.26.1
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -31,20 +31,12 @@ const (
var (
PodsEvicted = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
}, []string{"result", "strategy", "profile", "namespace", "node"})
PodsEvictedTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted_total",
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "profile", "namespace", "node"})
}, []string{"result", "strategy", "namespace", "node"})
buildInfo = metrics.NewGauge(
&metrics.GaugeOpts{
@@ -56,50 +48,9 @@ var (
},
)
DeschedulerLoopDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_loop_duration_seconds",
Help: "Time taken to complete a full descheduling cycle",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
}, []string{})
LoopDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "loop_duration_seconds",
Help: "Time taken to complete a full descheduling cycle",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
}, []string{})
DeschedulerStrategyDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_strategy_duration_seconds",
Help: "Time taken to complete Each strategy of the descheduling operation",
StabilityLevel: metrics.ALPHA,
DeprecatedVersion: "0.34.0",
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
}, []string{"strategy", "profile"})
StrategyDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "strategy_duration_seconds",
Help: "Time taken to complete Each strategy of the descheduling operation",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
}, []string{"strategy", "profile"})
metricsList = []metrics.Registerable{
PodsEvicted,
PodsEvictedTotal,
buildInfo,
DeschedulerLoopDuration,
DeschedulerStrategyDuration,
LoopDuration,
StrategyDuration,
}
)

View File

@@ -1,17 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api

View File

@@ -15,7 +15,7 @@ package api
import "sort"
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
func SortProfilesByName(profiles []Profile) []Profile {
sort.Slice(profiles, func(i, j int) bool {
return profiles[i].Name < profiles[j].Name
})

View File

@@ -18,7 +18,6 @@ package api
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -29,7 +28,7 @@ type DeschedulerPolicy struct {
metav1.TypeMeta
// Profiles
Profiles []DeschedulerProfile
Profiles []Profile
// NodeSelector for a set of nodes to operate over
NodeSelector *string
@@ -39,39 +38,13 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
// Default is false.
EvictionFailureEventNotification *bool
// MetricsCollector configures collection of metrics about actual resource utilization
// Deprecated. Use MetricsProviders field instead.
MetricsCollector *MetricsCollector
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
MetricsProviders []MetricsProvider
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
GracePeriodSeconds *int64
}
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable
type Namespaces struct {
Include []string `json:"include,omitempty"`
Exclude []string `json:"exclude,omitempty"`
}
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
type EvictionLimits struct {
// node restricts the maximum number of evictions per node
Node *uint `json:"node,omitempty"`
Include []string
Exclude []string
}
type (
@@ -80,11 +53,11 @@ type (
)
type PriorityThreshold struct {
Value *int32 `json:"value"`
Name string `json:"name"`
Value *int32
Name string
}
type DeschedulerProfile struct {
type Profile struct {
Name string
PluginConfigs []PluginConfig
Plugins Plugins
@@ -100,6 +73,7 @@ type Plugins struct {
Sort PluginSet
Deschedule PluginSet
Balance PluginSet
Evict PluginSet
Filter PluginSet
PreEvictionFilter PluginSet
}
@@ -108,55 +82,3 @@ type PluginSet struct {
Enabled []string
Disabled []string
}
type MetricsSource string
const (
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
KubernetesMetrics MetricsSource = "KubernetesMetrics"
// KubernetesMetrics enables metrics from a Prometheus metrics server.
PrometheusMetrics MetricsSource = "Prometheus"
)
// MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct {
// Enabled metrics collection from Kubernetes metrics.
// Deprecated. Use MetricsProvider.Source field instead.
Enabled bool
}
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
type MetricsProvider struct {
// Source enables metrics from Kubernetes metrics server.
Source MetricsSource
// Prometheus enables metrics collection through Prometheus
Prometheus *Prometheus
}
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
type Prometheus struct {
URL string
// authToken used for authentication with the prometheus server.
// If not set the in cluster authentication token for the descheduler service
// account is read from the container's file system.
AuthToken *AuthToken
}
type AuthToken struct {
// secretReference references an authentication token.
// secrets are expected to be created under the descheduler's namespace.
SecretReference *SecretReference
}
// SecretReference holds a reference to a Secret
type SecretReference struct {
// namespace is the namespace of the secret.
Namespace string
// name is the name of the secret.
Name string
}

View File

@@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}

23
pkg/api/v1alpha1/doc.go Normal file
View File

@@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha1 is the v1alpha1 version of the descheduler API
// +groupName=descheduler
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha1"

View File

@@ -0,0 +1,63 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name used in this package
const (
GroupName = "descheduler"
GroupVersion = "v1alpha1"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerPolicy{},
)
return nil
}

129
pkg/api/v1alpha1/types.go Normal file
View File

@@ -0,0 +1,129 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta `json:",inline"`
// Strategies
Strategies StrategyList `json:"strategies,omitempty"`
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type (
StrategyName string
StrategyList map[StrategyName]DeschedulerStrategy
)
type DeschedulerStrategy struct {
// Enabled or disabled
Enabled bool `json:"enabled,omitempty"`
// Weight
Weight int `json:"weight,omitempty"`
// Strategy parameters
Params *StrategyParameters `json:"params,omitempty"`
}
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable.
type Namespaces struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
}
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
type StrategyParameters struct {
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"`
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
}
type (
Percentage float64
ResourceThresholds map[v1.ResourceName]Percentage
)
type NodeResourceUtilizationThresholds struct {
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}
type PodsHavingTooManyRestarts struct {
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}
type RemoveDuplicates struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
}
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
States []string `json:"states,omitempty"`
// Deprecated: Use States instead.
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -0,0 +1,380 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = new(StrategyParameters)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
if in == nil {
return nil
}
out := new(DeschedulerStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Exclude != nil {
in, out := &in.Exclude, &out.Exclude
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
func (in *Namespaces) DeepCopy() *Namespaces {
if in == nil {
return nil
}
out := new(Namespaces)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
if in == nil {
return nil
}
out := new(NodeResourceUtilizationThresholds)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = *in
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
if in == nil {
return nil
}
out := new(PodLifeTime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
if in == nil {
return nil
}
out := new(PodsHavingTooManyRestarts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
if in == nil {
return nil
}
out := new(RemoveDuplicates)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
{
in := &in
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
if in == nil {
return nil
}
out := new(ResourceThresholds)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StrategyList) DeepCopyInto(out *StrategyList) {
{
in := &in
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
func (in StrategyList) DeepCopy() StrategyList {
if in == nil {
return nil
}
out := new(StrategyList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
if in.NodeResourceUtilizationThresholds != nil {
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
*out = new(NodeResourceUtilizationThresholds)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodsHavingTooManyRestarts != nil {
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
*out = new(PodsHavingTooManyRestarts)
**out = **in
}
if in.PodLifeTime != nil {
in, out := &in.PodLifeTime, &out.PodLifeTime
*out = new(PodLifeTime)
(*in).DeepCopyInto(*out)
}
if in.RemoveDuplicates != nil {
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ThresholdPriority != nil {
in, out := &in.ThresholdPriority, &out.ThresholdPriority
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
if in == nil {
return nil
}
out := new(StrategyParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,33 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -1,136 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
)
var (
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
// used for defaulting/converting typed PluginConfig Args.
// Access via getPluginArgConversionScheme()
pluginArgConversionScheme *runtime.Scheme
initPluginArgConversionScheme sync.Once
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
func GetPluginArgConversionScheme() *runtime.Scheme {
initPluginArgConversionScheme.Do(func() {
// set up the scheme used for plugin arg conversion
pluginArgConversionScheme = runtime.NewScheme()
utilruntime.Must(AddToScheme(pluginArgConversionScheme))
utilruntime.Must(api.AddToScheme(pluginArgConversionScheme))
})
return pluginArgConversionScheme
}
func Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
if err := autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s); err != nil {
return err
}
return convertToInternalPluginConfigArgs(out)
}
// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal
// types using a scheme, after applying defaults.
func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
prof := &out.Profiles[i]
for j := range prof.PluginConfigs {
args := prof.PluginConfigs[j].Args
if args == nil {
continue
}
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
continue
}
internalArgs, err := scheme.ConvertToVersion(args, api.SchemeGroupVersion)
if err != nil {
err = nil
internalArgs = args
if err != nil {
return fmt.Errorf("converting .Profiles[%d].PluginConfigs[%d].Args into internal type: %w", i, j, err)
}
}
prof.PluginConfigs[j].Args = internalArgs
}
}
return nil
}
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
if in.Args.Raw != nil {
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
if err != nil {
return err
}
} else if in.Args.Object != nil {
out.Args = in.Args.Object
}
} else {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
return err
}
}
return nil
}
func Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
if err := autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in, out, s); err != nil {
return err
}
return convertToExternalPluginConfigArgs(out)
}
// convertToExternalPluginConfigArgs converts PluginConfig#Args into
// external (versioned) types using a scheme.
func convertToExternalPluginConfigArgs(out *DeschedulerPolicy) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
for j := range out.Profiles[i].PluginConfigs {
args := out.Profiles[i].PluginConfigs[j].Args
if args.Object == nil {
continue
}
if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown {
continue
}
externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion)
if err != nil {
return err
}
out.Profiles[i].PluginConfigs[j].Args.Object = externalArgs
}
}
return nil
}

View File

@@ -1,23 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}

View File

@@ -1,24 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha2 is the v1alpha2 version of the descheduler API
// +groupName=descheduler
package v1alpha2 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha2"

View File

@@ -1,63 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name used in this package
const (
GroupName = "descheduler"
GroupVersion = "v1alpha2"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
// TODO this will get cleaned up with the scheme types are fixed
scheme.AddKnownTypes(SchemeGroupVersion,
&DeschedulerPolicy{},
)
return nil
}

View File

@@ -1,26 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import "sort"
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
sort.Slice(profiles, func(i, j int) bool {
return profiles[i].Name < profiles[j].Name
})
return profiles
}

View File

@@ -1,134 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta `json:",inline"`
// Profiles
Profiles []DeschedulerProfile `json:"profiles,omitempty"`
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
// Default is false.
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
// MetricsCollector configures collection of metrics for actual resource utilization
// Deprecated. Use MetricsProviders field instead.
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
}
type DeschedulerProfile struct {
Name string `json:"name"`
PluginConfigs []PluginConfig `json:"pluginConfig"`
Plugins Plugins `json:"plugins"`
}
type Plugins struct {
PreSort PluginSet `json:"presort"`
Sort PluginSet `json:"sort"`
Deschedule PluginSet `json:"deschedule"`
Balance PluginSet `json:"balance"`
Filter PluginSet `json:"filter"`
PreEvictionFilter PluginSet `json:"preevictionfilter"`
}
type PluginConfig struct {
Name string `json:"name"`
Args runtime.RawExtension `json:"args"`
}
type PluginSet struct {
Enabled []string `json:"enabled"`
Disabled []string `json:"disabled"`
}
type MetricsSource string
const (
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
KubernetesMetrics MetricsSource = "KubernetesMetrics"
// KubernetesMetrics enables metrics from a Prometheus metrics server.
PrometheusMetrics MetricsSource = "Prometheus"
)
// MetricsCollector configures collection of metrics about actual resource utilization
type MetricsCollector struct {
// Enabled metrics collection from Kubernetes metrics server.
// Deprecated. Use MetricsProvider.Source field instead.
Enabled bool `json:"enabled,omitempty"`
}
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
type MetricsProvider struct {
// Source enables metrics from Kubernetes metrics server.
Source MetricsSource `json:"source,omitempty"`
// Prometheus enables metrics collection through Prometheus
Prometheus *Prometheus `json:"prometheus,omitempty"`
}
type Prometheus struct {
URL string `json:"url,omitempty"`
// authToken used for authentication with the prometheus server.
// If not set the in cluster authentication token for the descheduler service
// account is read from the container's file system.
AuthToken *AuthToken `json:"authToken,omitempty"`
}
type AuthToken struct {
// secretReference references an authentication token.
// secrets are expected to be created under the descheduler's namespace.
SecretReference *SecretReference `json:"secretReference,omitempty"`
}
// SecretReference holds a reference to a Secret
type SecretReference struct {
// namespace is the namespace of the secret.
Namespace string `json:"namespace,omitempty"`
// name is the name of the secret.
Name string `json:"name,omitempty"`
}

View File

@@ -1,437 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha2
import (
unsafe "unsafe"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerProfile)(nil), (*DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(a.(*api.DeschedulerProfile), b.(*DeschedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PluginSet)(nil), (*api.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PluginSet_To_api_PluginSet(a.(*PluginSet), b.(*api.PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginSet)(nil), (*PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginSet_To_v1alpha2_PluginSet(a.(*api.PluginSet), b.(*PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Plugins)(nil), (*api.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Plugins_To_api_Plugins(a.(*Plugins), b.(*api.Plugins), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Plugins)(nil), (*Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Plugins_To_v1alpha2_Plugins(a.(*api.Plugins), b.(*Plugins), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*PluginConfig)(nil), (*api.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PluginConfig_To_api_PluginConfig(a.(*PluginConfig), b.(*api.PluginConfig), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
return nil
}
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
}
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
return nil
}
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
}
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]api.DeschedulerProfile, len(*in))
for i := range *in {
if err := Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
return nil
}
func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
for i := range *in {
if err := Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
return nil
}
func autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
out.Name = in.Name
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]api.PluginConfig, len(*in))
for i := range *in {
if err := Convert_v1alpha2_PluginConfig_To_api_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfigs = nil
}
if err := Convert_v1alpha2_Plugins_To_api_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile is an autogenerated conversion function.
func Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
return autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in, out, s)
}
func autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
out.Name = in.Name
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
if err := Convert_api_PluginConfig_To_v1alpha2_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfigs = nil
}
if err := Convert_api_Plugins_To_v1alpha2_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
return err
}
return nil
}
// Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile is an autogenerated conversion function.
func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
}
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
}
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
}
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
out.Source = api.MetricsSource(in.Source)
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
return nil
}
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
}
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
out.Source = MetricsSource(in.Source)
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
return nil
}
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
}
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
func autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
// Convert_api_PluginConfig_To_v1alpha2_PluginConfig is an autogenerated conversion function.
func Convert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
return autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in, out, s)
}
func autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
return nil
}
// Convert_v1alpha2_PluginSet_To_api_PluginSet is an autogenerated conversion function.
func Convert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
return autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in, out, s)
}
func autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
return nil
}
// Convert_api_PluginSet_To_v1alpha2_PluginSet is an autogenerated conversion function.
func Convert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
return autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in, out, s)
}
func autoConvert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Sort, &out.Sort, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Balance, &out.Balance, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_Plugins_To_api_Plugins is an autogenerated conversion function.
func Convert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
return autoConvert_v1alpha2_Plugins_To_api_Plugins(in, out, s)
}
func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Sort, &out.Sort, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Balance, &out.Balance, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
return err
}
return nil
}
// Convert_api_Plugins_To_v1alpha2_Plugins is an autogenerated conversion function.
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
}
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
out.URL = in.URL
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
return nil
}
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
}
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
out.URL = in.URL
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
return nil
}
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
}
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
}
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
out.Namespace = in.Namespace
out.Name = in.Name
return nil
}
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
}

View File

@@ -1,284 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
*out = *in
if in.SecretReference != nil {
in, out := &in.SecretReference, &out.SecretReference
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
func (in *AuthToken) DeepCopy() *AuthToken {
if in == nil {
return nil
}
out := new(AuthToken)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictTotal != nil {
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
*out = new(uint)
**out = **in
}
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
if in.MetricsCollector != nil {
in, out := &in.MetricsCollector, &out.MetricsCollector
*out = new(MetricsCollector)
**out = **in
}
if in.MetricsProviders != nil {
in, out := &in.MetricsProviders, &out.MetricsProviders
*out = make([]MetricsProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GracePeriodSeconds != nil {
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
*out = *in
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Plugins.DeepCopyInto(&out.Plugins)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
if in == nil {
return nil
}
out := new(DeschedulerProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
*out = *in
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(Prometheus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
if in == nil {
return nil
}
out := new(MetricsProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
*out = *in
in.Args.DeepCopyInto(&out.Args)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
func (in *PluginConfig) DeepCopy() *PluginConfig {
if in == nil {
return nil
}
out := new(PluginConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
func (in *PluginSet) DeepCopy() *PluginSet {
if in == nil {
return nil
}
out := new(PluginSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugins) DeepCopyInto(out *Plugins) {
*out = *in
in.PreSort.DeepCopyInto(&out.PreSort)
in.Sort.DeepCopyInto(&out.Sort)
in.Deschedule.DeepCopyInto(&out.Deschedule)
in.Balance.DeepCopyInto(&out.Balance)
in.Filter.DeepCopyInto(&out.Filter)
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
func (in *Plugins) DeepCopy() *Plugins {
if in == nil {
return nil
}
out := new(Plugins)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
*out = *in
if in.AuthToken != nil {
in, out := &in.AuthToken, &out.AuthToken
*out = new(AuthToken)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
func (in *Prometheus) DeepCopy() *Prometheus {
if in == nil {
return nil
}
out := new(Prometheus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,33 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -25,34 +25,13 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
*out = *in
if in.SecretReference != nil {
in, out := &in.SecretReference, &out.SecretReference
*out = new(SecretReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
func (in *AuthToken) DeepCopy() *AuthToken {
if in == nil {
return nil
}
out := new(AuthToken)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
*out = make([]Profile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
@@ -72,33 +51,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictTotal != nil {
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
*out = new(uint)
**out = **in
}
if in.EvictionFailureEventNotification != nil {
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
*out = new(bool)
**out = **in
}
if in.MetricsCollector != nil {
in, out := &in.MetricsCollector, &out.MetricsCollector
*out = new(MetricsCollector)
**out = **in
}
if in.MetricsProviders != nil {
in, out := &in.MetricsProviders, &out.MetricsProviders
*out = make([]MetricsProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.GracePeriodSeconds != nil {
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
*out = new(int64)
**out = **in
}
return
}
@@ -120,88 +72,6 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
*out = *in
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Plugins.DeepCopyInto(&out.Plugins)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
if in == nil {
return nil
}
out := new(DeschedulerProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
*out = *in
if in.Node != nil {
in, out := &in.Node, &out.Node
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
if in == nil {
return nil
}
out := new(EvictionLimits)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
if in == nil {
return nil
}
out := new(MetricsCollector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
*out = *in
if in.Prometheus != nil {
in, out := &in.Prometheus, &out.Prometheus
*out = new(Prometheus)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
if in == nil {
return nil
}
out := new(MetricsProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
@@ -280,6 +150,7 @@ func (in *Plugins) DeepCopyInto(out *Plugins) {
in.Sort.DeepCopyInto(&out.Sort)
in.Deschedule.DeepCopyInto(&out.Deschedule)
in.Balance.DeepCopyInto(&out.Balance)
in.Evict.DeepCopyInto(&out.Evict)
in.Filter.DeepCopyInto(&out.Filter)
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
return
@@ -317,22 +188,25 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
func (in *Profile) DeepCopyInto(out *Profile) {
*out = *in
if in.AuthToken != nil {
in, out := &in.AuthToken, &out.AuthToken
*out = new(AuthToken)
(*in).DeepCopyInto(*out)
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Plugins.DeepCopyInto(&out.Plugins)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
func (in *Prometheus) DeepCopy() *Prometheus {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile.
func (in *Profile) DeepCopy() *Profile {
if in == nil {
return nil
}
out := new(Prometheus)
out := new(Profile)
in.DeepCopyInto(out)
return out
}
@@ -358,19 +232,3 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -33,7 +34,6 @@ type DeschedulerConfiguration struct {
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
// Deprecated: Use clientConnection.kubeConfig instead.
KubeconfigFile string
// PolicyConfigFile is the filepath to the descheduler policy configuration.
@@ -51,41 +51,13 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
EvictDaemonSetPods bool
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// Tracing specifies the options for tracing.
Tracing TracingConfiguration
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration
}

View File

@@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
registry "k8s.io/component-base/logs/api/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -33,7 +34,6 @@ type DeschedulerConfiguration struct {
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
// Deprecated: Use clientConnection.kubeConfig instead.
KubeconfigFile string `json:"kubeconfigFile"`
// PolicyConfigFile is the filepath to the descheduler policy configuration.
@@ -51,41 +51,13 @@ type DeschedulerConfiguration struct {
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// Tracing is used to setup the required OTEL tracing configuration
Tracing TracingConfiguration `json:"tracing,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string `json:"collectorEndpoint"`
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string `json:"transportCert,omitempty"`
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string `json:"serviceName,omitempty"`
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string `json:"serviceNamespace,omitempty"`
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64 `json:"sampleRate"`
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -46,16 +46,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
}); err != nil {
return err
}
return nil
}
@@ -67,13 +57,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.EvictDaemonSetPods = in.EvictDaemonSetPods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
out.Logging = in.Logging
return nil
}
@@ -90,13 +76,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.NodeSelector = in.NodeSelector
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.EvictDaemonSetPods = in.EvictDaemonSetPods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
out.Logging = in.Logging
return nil
}
@@ -104,33 +86,3 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
}
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
}
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,9 +29,8 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -52,19 +51,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2026 The Kubernetes Authors.
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,9 +29,8 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -52,19 +51,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -17,78 +17,41 @@ limitations under the License.
package client
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"time"
promapi "github.com/prometheus/client_golang/api"
"github.com/prometheus/common/config"
// Ensure to load all auth plugins.
clientset "k8s.io/client-go/kubernetes"
// Ensure to load all auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/transport"
componentbaseconfig "k8s.io/component-base/config"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
)
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
func CreateClient(kubeconfig, userAgt string) (clientset.Interface, error) {
var cfg *rest.Config
if len(clientConnection.Kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to parse kubeconfig file: %v ", err)
return nil, fmt.Errorf("Failed to parse kubeconfig file: %v ", err)
}
cfg, err = clientcmd.BuildConfigFromFlags(master, clientConnection.Kubeconfig)
cfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
return nil, fmt.Errorf("unable to build config: %v", err)
return nil, fmt.Errorf("Unable to build config: %v", err)
}
} else {
var err error
cfg, err = rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("unable to build in cluster config: %v", err)
return nil, fmt.Errorf("Unable to build in cluster config: %v", err)
}
}
cfg.Burst = int(clientConnection.Burst)
cfg.QPS = clientConnection.QPS
if len(userAgt) != 0 {
cfg = rest.AddUserAgent(cfg, userAgt)
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
} else {
return clientset.NewForConfig(cfg)
}
return cfg, nil
}
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
cfg, err := createConfig(clientConnection, userAgt)
if err != nil {
return nil, fmt.Errorf("unable to create config: %v", err)
}
return clientset.NewForConfig(cfg)
}
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
cfg, err := createConfig(clientConnection, userAgt)
if err != nil {
return nil, fmt.Errorf("unable to create config: %v", err)
}
// Create the metrics clientset to access the metrics.k8s.io API
return metricsclient.NewForConfig(cfg)
}
func GetMasterFromKubeconfig(filename string) (string, error) {
@@ -99,69 +62,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
context, ok := config.Contexts[config.CurrentContext]
if !ok {
return "", fmt.Errorf("failed to get master address from kubeconfig: current context not found")
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}
if val, ok := config.Clusters[context.Cluster]; ok {
return val.Server, nil
}
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
}
func loadCAFile(filepath string) (*x509.CertPool, error) {
caCert, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, err
}
caCertPool := x509.NewCertPool()
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
return nil, fmt.Errorf("failed to append CA certificate to the pool")
}
return caCertPool, nil
}
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
// Retrieve Pod CA cert
caCertPool, err := loadCAFile(K8sPodCAFilePath)
if err != nil {
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
}
// Get Prometheus Host
u, err := url.Parse(prometheusURL)
if err != nil {
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
}
t := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{
RootCAs: caCertPool,
ServerName: u.Host,
},
}
roundTripper := transport.NewBearerAuthRoundTripper(
authToken,
t,
)
if authToken != "" {
client, err := promapi.NewClient(promapi.Config{
Address: prometheusURL,
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
})
return client, t, err
}
client, err := promapi.NewClient(promapi.Config{
Address: prometheusURL,
})
return client, t, err
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}

View File

@@ -19,40 +19,21 @@ package descheduler
import (
"context"
"fmt"
"math"
"net/http"
"strconv"
"time"
promapi "github.com/prometheus/client_golang/api"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
policyv1 "k8s.io/api/policy/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/rest"
listersv1 "k8s.io/client-go/listers/core/v1"
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/util/workqueue"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
@@ -60,421 +41,25 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/tracing"
"sigs.k8s.io/descheduler/pkg/framework"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
)
const (
prometheusAuthTokenSecretKey = "prometheusAuthToken"
workQueueKey = "key"
)
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
type profileRunner struct {
name string
descheduleEPs, balanceEPs eprunner
}
type descheduler struct {
rs *options.DeschedulerServer
ir *informerResources
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
namespacedSecretsLister corev1listers.SecretNamespaceLister
deschedulerPolicy *api.DeschedulerPolicy
eventRecorder events.EventRecorder
podEvictor *evictions.PodEvictor
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
metricsCollector *metricscollector.MetricsCollector
prometheusClient promapi.Client
previousPrometheusClientTransport *http.Transport
queue workqueue.RateLimitingInterface
currentPrometheusAuthToken string
metricsProviders map[api.MetricsSource]*api.MetricsProvider
}
type informerResources struct {
sharedInformerFactory informers.SharedInformerFactory
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
}
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
return &informerResources{
sharedInformerFactory: sharedInformerFactory,
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
}
}
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
for _, resource := range resources {
informer, err := ir.sharedInformerFactory.ForResource(resource)
if err != nil {
return err
}
ir.resourceToInformer[resource] = informer
}
return nil
}
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
for resource, informer := range ir.resourceToInformer {
_, err := newFactory.ForResource(resource)
if err != nil {
return fmt.Errorf("error getting resource %s: %w", resource, err)
}
objects, err := informer.Lister().List(labels.Everything())
if err != nil {
return fmt.Errorf("error listing %s: %w", informer, err)
}
for _, object := range objects {
fakeClient.Tracker().Add(object)
}
}
return nil
}
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider {
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
for _, provider := range providersList {
providersMap[provider.Source] = &provider
}
return providersMap
}
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
ir := newInformerResources(sharedInformerFactory)
ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
v1.SchemeGroupVersion.WithResource("nodes"),
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
// consistent with the real runs without having to keep the list here in sync.
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"), // Used by the defaultevictor plugin
) // Used by the defaultevictor plugin
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
}
podEvictor, err := evictions.NewPodEvictor(
ctx,
rs.Client,
eventRecorder,
podInformer,
rs.DefaultFeatureGates,
evictions.NewOptions().
WithPolicyGroupVersion(evictionPolicyGroupVersion).
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
WithDryRun(rs.DryRun).
WithMetricsEnabled(!rs.DisableMetrics),
)
if err != nil {
return nil, err
}
desch := &descheduler{
rs: rs,
ir: ir,
getPodsAssignedToNode: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
deschedulerPolicy: deschedulerPolicy,
eventRecorder: eventRecorder,
podEvictor: podEvictor,
podEvictionReactionFnc: podEvictionReactionFnc,
prometheusClient: rs.PrometheusClient,
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
}
if rs.MetricsClient != nil {
nodeSelector := labels.Everything()
if deschedulerPolicy.NodeSelector != nil {
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
if err != nil {
return nil, err
}
nodeSelector = sel
}
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
}
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
}
if namespacedSharedInformerFactory == nil {
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
}
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
}
return desch, nil
}
func (d *descheduler) reconcileInClusterSAToken() error {
// Read the sa token and assume it has the sufficient permissions to authenticate
cfg, err := rest.InClusterConfig()
if err == nil {
if d.currentPrometheusAuthToken != cfg.BearerToken {
klog.V(2).Infof("Creating Prometheus client (with SA token)")
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
if err != nil {
return fmt.Errorf("unable to create a prometheus client: %v", err)
}
d.prometheusClient = prometheusClient
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = transport
d.currentPrometheusAuthToken = cfg.BearerToken
}
return nil
}
if err == rest.ErrNotInCluster {
return nil
}
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
}
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
defer utilruntime.HandleCrash()
defer d.queue.ShutDown()
klog.Infof("Starting authentication secret reconciler")
defer klog.Infof("Shutting down authentication secret reconciler")
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
<-ctx.Done()
}
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
for d.processNextWorkItem(ctx) {
}
}
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
dsKey, quit := d.queue.Get()
if quit {
return false
}
defer d.queue.Done(dsKey)
err := d.sync()
if err == nil {
d.queue.Forget(dsKey)
return true
}
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
d.queue.AddRateLimited(dsKey)
return true
}
func (d *descheduler) sync() error {
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
}
ns := prometheusConfig.AuthToken.SecretReference.Namespace
name := prometheusConfig.AuthToken.SecretReference.Name
secretObj, err := d.namespacedSecretsLister.Get(name)
if err != nil {
// clear the token if the secret is not found
if apierrors.IsNotFound(err) {
d.currentPrometheusAuthToken = ""
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = nil
d.prometheusClient = nil
}
return fmt.Errorf("unable to get %v/%v secret", ns, name)
}
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
if authToken == "" {
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
}
if d.currentPrometheusAuthToken == authToken {
return nil
}
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
if err != nil {
return fmt.Errorf("unable to create a prometheus client: %v", err)
}
d.prometheusClient = prometheusClient
if d.previousPrometheusClientTransport != nil {
d.previousPrometheusClientTransport.CloseIdleConnections()
}
d.previousPrometheusClientTransport = transport
d.currentPrometheusAuthToken = authToken
return nil
}
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
}
}
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
defer span.End()
defer func(loopStartDuration time.Time) {
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
metrics.LoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
}(time.Now())
// if len is still <= 1 error out
if len(nodes) <= 1 {
klog.InfoS("Skipping descheduling cycle: requires >=2 nodes", "found", len(nodes))
return nil // gracefully skip this cycle instead of aborting
}
var client clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if d.rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
if err != nil {
return err
}
// create a new instance of the shared informer factor from the cached client
// register the pod informer, otherwise it will not get running
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
client = fakeClient
d.sharedInformerFactory = fakeSharedInformerFactory
} else {
client = d.rs.Client
}
klog.V(3).Infof("Setting up the pod evictor")
d.podEvictor.SetClient(client)
d.podEvictor.ResetCounters()
d.runProfiles(ctx, client, nodes)
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
return nil
}
// runProfiles runs all the deschedule plugins of all profiles and
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
// see https://github.com/kubernetes-sigs/descheduler/issues/979
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End()
var profileRunners []profileRunner
for idx, profile := range d.deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
ctx,
profile,
pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client),
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
frameworkprofile.WithPodEvictor(d.podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
frameworkprofile.WithMetricsCollector(d.metricsCollector),
frameworkprofile.WithPrometheusClient(d.prometheusClient),
// Generate a unique instance ID using just the index to avoid long IDs
// when profile names are very long
frameworkprofile.WithProfileInstanceID(fmt.Sprintf("%d", idx)),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
continue
}
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
}
for _, profileR := range profileRunners {
// First deschedule
status := profileR.descheduleEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
continue
}
}
for _, profileR := range profileRunners {
// Balance Later
status := profileR.balanceEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
continue
}
}
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "Run")
defer span.End()
metrics.Register()
clientConnection := rs.ClientConnection
if rs.KubeconfigFile != "" && clientConnection.Kubeconfig == "" {
clientConnection.Kubeconfig = rs.KubeconfigFile
}
rsclient, eventClient, err := createClients(clientConnection)
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
if err != nil {
return err
}
rs.Client = rsclient
rs.EventClient = eventClient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginregistry.PluginRegistry)
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginbuilder.PluginRegistry)
if err != nil {
return err
}
@@ -482,40 +67,25 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return fmt.Errorf("deschedulerPolicy is nil")
}
// Add k8s compatibility warnings to logs
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
klog.Warning(err.Error())
}
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
return err
}
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil {
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
if err != nil {
return err
}
rs.MetricsClient = metricsClient
}
runFn := func() error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
}
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
}
if rs.LeaderElection.LeaderElect && rs.DryRun {
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
}
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
return fmt.Errorf("leaderElection: %w", err)
}
return nil
@@ -524,38 +94,16 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return runFn()
}
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, deschedulerVersionInfo version.Info) error {
kubeServerVersionInfo, err := discovery.ServerVersion()
if err != nil {
return fmt.Errorf("failed to discover Kubernetes server version: %v", err)
}
kubeServerVersion, err := utilversion.ParseSemantic(kubeServerVersionInfo.String())
if err != nil {
return fmt.Errorf("failed to parse Kubernetes server version '%s': %v", kubeServerVersionInfo.String(), err)
}
deschedulerMinor, err := strconv.ParseFloat(deschedulerVersionInfo.Minor, 64)
if err != nil {
return fmt.Errorf("failed to convert Descheduler minor version '%s' to float: %v", deschedulerVersionInfo.Minor, err)
}
kubeServerMinor := float64(kubeServerVersion.Minor())
if math.Abs(deschedulerMinor-kubeServerMinor) > 3 {
return fmt.Errorf(
"descheduler version %s.%s may not be supported on your version of Kubernetes %v."+
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
deschedulerVersionInfo.Major,
deschedulerVersionInfo.Minor,
kubeServerVersionInfo.String(),
)
}
return nil
}
func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
func cachedClient(
realClient clientset.Interface,
podLister listersv1.PodLister,
nodeLister listersv1.NodeLister,
namespaceLister listersv1.NamespaceLister,
priorityClassLister schedulingv1.PriorityClassLister,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
@@ -572,23 +120,132 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
}
// fallback to the default reactor
return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
}
type tokenReconciliation int
// evictorImpl implements the Evictor interface so plugins
// can evict a pod without importing a specific pod evictor
type evictorImpl struct {
podEvictor *evictions.PodEvictor
evictorFilter framework.EvictorPlugin
}
const (
noReconciliation tokenReconciliation = iota
inClusterReconciliation
secretReconciliation
)
var _ framework.Evictor = &evictorImpl{}
// Filter checks if a pod can be evicted
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
return ei.evictorFilter.Filter(pod)
}
// PreEvictionFilter checks if pod can be evicted right before eviction
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
return ei.evictorFilter.PreEvictionFilter(pod)
}
// Evict evicts a pod (no pre-check performed)
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return ei.podEvictor.EvictPod(ctx, pod, opts)
}
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
return ei.podEvictor.NodeLimitExceeded(node)
}
// handleImpl implements the framework handle which gets passed to plugins
type handleImpl struct {
clientSet clientset.Interface
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictor *evictorImpl
}
var _ framework.Handle = &handleImpl{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
}
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.getPodsAssignedToNodeFunc
}
// SharedInformerFactory retrieves shared informer factory
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.sharedInformerFactory
}
// Evictor retrieves evictor so plugins can filter and evict pods
func (hi *handleImpl) Evictor() framework.Evictor {
return hi.evictor
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
defer span.End()
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
@@ -601,88 +258,155 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
} else {
eventClient = rs.Client
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
var namespacedSharedInformerFactory informers.SharedInformerFactory
metricProviderTokenReconciliation := noReconciliation
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
if prometheusProvider.Prometheus.AuthToken != nil {
// Will get reconciled
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
metricProviderTokenReconciliation = secretReconciliation
} else {
// Use the sa token and assume it has the sufficient permissions to authenticate
metricProviderTokenReconciliation = inClusterReconciliation
}
}
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
if err != nil {
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sharedInformerFactory.Start(ctx.Done())
if metricProviderTokenReconciliation == secretReconciliation {
namespacedSharedInformerFactory.Start(ctx.Done())
}
sharedInformerFactory.WaitForCacheSync(ctx.Done())
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
if metricProviderTokenReconciliation == secretReconciliation {
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
}
if descheduler.metricsCollector != nil {
go func() {
klog.V(2).Infof("Starting metrics collector")
descheduler.metricsCollector.Run(ctx)
klog.V(2).Infof("Stopped metrics collector")
}()
klog.V(2).Infof("Waiting for metrics collector to sync")
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
return descheduler.metricsCollector.HasSynced(), nil
}); err != nil {
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
}
}
if metricProviderTokenReconciliation == secretReconciliation {
go descheduler.runAuthenticationSecretReconciler(ctx)
}
wait.NonSlidingUntil(func() {
if metricProviderTokenReconciliation == inClusterReconciliation {
// Read the sa token and assume it has the sufficient permissions to authenticate
if err := descheduler.reconcileInClusterSAToken(); err != nil {
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
cancel()
return
}
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
cancel()
return
}
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
podEvictorClient,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!rs.DisableMetrics,
eventRecorder,
)
var enabledDeschedulePlugins []framework.DeschedulePlugin
var enabledBalancePlugins []framework.BalancePlugin
// Build plugins
for _, profile := range deschedulerPolicy.Profiles {
pc := getPluginConfig(defaultevictor.PluginName, profile.PluginConfigs)
if pc == nil {
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", defaultevictor.PluginName, "profile", profile.Name)
continue
}
evictorFilter, err := defaultevictor.New(
pc.Args,
&handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
},
)
if err != nil {
klog.ErrorS(fmt.Errorf("unable to construct a plugin"), "skipping plugin", "plugin", defaultevictor.PluginName)
continue
}
handle := &handleImpl{
clientSet: rs.Client,
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
evictor: &evictorImpl{
podEvictor: podEvictor,
evictorFilter: evictorFilter.(framework.EvictorPlugin),
},
}
// Assuming only a list of enabled extension points.
// Later, when a default list of plugins and their extension points is established,
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
for _, plugin := range append(profile.Plugins.Deschedule.Enabled, profile.Plugins.Balance.Enabled...) {
pc := getPluginConfig(plugin, profile.PluginConfigs)
if pc == nil {
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", plugin)
continue
}
registryPlugin, ok := pluginbuilder.PluginRegistry[plugin]
pgFnc := registryPlugin.PluginBuilder
if !ok {
klog.ErrorS(fmt.Errorf("unable to find plugin in the pluginsMap"), "skipping plugin", "plugin", plugin)
}
pg, err := pgFnc(pc.Args, handle)
if err != nil {
klog.ErrorS(err, "unable to initialize a plugin", "pluginName", plugin)
}
if pg != nil {
switch v := pg.(type) {
case framework.DeschedulePlugin:
enabledDeschedulePlugins = append(enabledDeschedulePlugins, v)
case framework.BalancePlugin:
enabledBalancePlugins = append(enabledBalancePlugins, v)
default:
klog.ErrorS(fmt.Errorf("unknown plugin extension point"), "skipping plugin", "plugin", plugin)
}
}
}
}
// A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End()
// Execute extension points
for _, pg := range enabledDeschedulePlugins {
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
// work, we are currently passing this via context. To be removed
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
status := pg.Deschedule(childCtx, nodes)
if status != nil && status.Err != nil {
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
}
}
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
if err != nil {
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
cancel()
return
}
err = descheduler.runDeschedulerLoop(sCtx, nodes)
if err != nil {
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
cancel()
return
for _, pg := range enabledBalancePlugins {
// TODO: strategyName should be accessible from within the strategy using a framework
// handle or function which the Evictor has access to. For migration/in-progress framework
// work, we are currently passing this via context. To be removed
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
status := pg.Balance(childCtx, nodes)
if status != nil && status.Err != nil {
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
cancel()
@@ -692,32 +416,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return nil
}
func GetPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
for idx, pluginConfig := range pluginConfigs {
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) *api.PluginConfig {
for _, pluginConfig := range pluginConfigs {
if pluginConfig.Name == pluginName {
return &pluginConfig, idx
return &pluginConfig
}
}
return nil, 0
return nil
}
func createClients(clientConnection componentbaseconfig.ClientConnectionConfiguration) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(clientConnection, "descheduler")
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(kubeconfig, "descheduler")
if err != nil {
return nil, nil, err
}
eventClient, err := client.CreateClient(clientConnection, "")
eventClient, err := client.CreateClient(kubeconfig, "")
if err != nil {
return nil, nil, err
}
return kClient, eventClient, nil
}
func trimManagedFields(obj interface{}) (interface{}, error) {
if accessor, err := meta.Accessor(obj); err == nil {
accessor.SetManagedFields(nil)
}
return obj, nil
}

Some files were not shown because too many files have changed in this diff Show More