1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 13:29:11 +01:00

Compare commits

..

7 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
9ed7f93abd Merge pull request #742 from damemi/ctx-cancel-backport
[release-1.23] Backport context cancel panic + Go version update
2022-02-28 10:55:46 -08:00
Antonio Gurgel
17128794ed Update golang image
1.17.3 is affected by CVE-2021-44716.
2022-02-28 18:43:36 +00:00
Mike Dame
09dcf88149 Update Helm chart for appVersion v0.23.1 2022-02-25 14:03:00 +00:00
Julian Lawrence
5e63610474 updated to handle cronjob flow 2022-02-25 14:01:16 +00:00
Kubernetes Prow Robot
6cdfab1aa9 Merge pull request #719 from JaneLiuL/release-1.23
[release-1.23] update docker image version to v0.23 and fix watch namespace issue
2022-02-09 05:34:18 -08:00
Jane Liu L
2e343a5202 update docker image version to v0.23 and fix watch namespace issue 2022-02-09 21:16:49 +08:00
Kubernetes Prow Robot
547dbcec65 Merge pull request #710 from damemi/update-helm-23
[release-1.23] Update helm chart version to v0.23
2022-02-03 12:22:58 -08:00
4105 changed files with 156365 additions and 426019 deletions

6
.gitattributes vendored
View File

@@ -1,6 +0,0 @@
# Always check-out / check-in files with LF line endings.
* text=auto eol=lf
go.sum merge=union
**/zz_generated.*.go linguist-generated=true
vendor/** linguist-vendored

5
.github/ci/ct.yaml vendored
View File

@@ -1,5 +0,0 @@
chart-dirs:
- charts
helm-extra-args: "--timeout=5m"
check-version-increment: false
target-branch: master

View File

@@ -1,62 +0,0 @@
name: Helm
on:
push:
branches:
- master
- release-*
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
- '.github/ci/ct.yaml'
pull_request:
paths:
- 'charts/**'
- '.github/workflows/helm.yaml'
- '.github/ci/ct.yaml'
jobs:
lint-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v2.1
with:
version: v3.9.2
- uses: actions/setup-python@v3.1.2
with:
python-version: 3.7
- uses: actions/setup-go@v3
with:
go-version: '1.21.5'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1
with:
version: v3.7.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config=.github/ci/ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
# Need a multi node cluster so descheduler runs until evictions
- name: Create multi node Kind cluster
run: make kind-multi-node
# helm-extra-set-args only available after ct 3.6.0
- name: Run chart-testing (install)
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'

View File

@@ -1,42 +0,0 @@
name: manifests
on:
pull_request:
jobs:
deploy:
strategy:
matrix:
k8s-version: ["v1.29.0"]
descheduler-version: ["v0.29.0"]
descheduler-api: ["v1alpha1", "v1alpha2"]
manifest: ["deployment"]
runs-on: ubuntu-latest
steps:
- name: Checkout Repo
uses: actions/checkout@v3
- name: Create kind cluster
uses: helm/kind-action@v1.5.0
with:
node_image: kindest/node:${{ matrix.k8s-version }}
kubectl_version: ${{ matrix.k8s-version }}
config: test/kind-config.yaml
- uses: actions/setup-go@v5
with:
go-version-file: go.mod
cache: true
- name: Build image
run: |
VERSION="dev" make dev-image
docker tag descheduler:dev registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }}
- name: Kind load image
run: |
kind load docker-image registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }} --name chart-testing
- name: Create k8s manifests
run: |
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f test/manifests/${{ matrix.descheduler-api }}/configmap.yaml
kubectl create -f kubernetes/${{ matrix.manifest }}/${{ matrix.manifest }}.yaml
- name: Wait for ready condition
run: |
kubectl wait --for=condition=Available --timeout=60s ${{ matrix.manifest }} descheduler -n kube-system

View File

@@ -22,7 +22,7 @@ jobs:
- name: Install Helm
uses: azure/setup-helm@v1
with:
version: v3.7.0
version: v3.4.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.1.0

View File

@@ -1,47 +0,0 @@
name: "Security"
on:
push:
branches:
- main
- master
- release-*
schedule:
- cron: '30 1 * * 0'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build image
run: |
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
VERSION=security-test make image
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'descheduler:security-test'
format: 'sarif'
exit-code: '0'
severity: 'CRITICAL,HIGH'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
exit-code: '0'

3
.gitignore vendored
View File

@@ -4,5 +4,4 @@ vendordiff.patch
.idea/
*.code-workspace
.vscode/
kind
bin/
kind

View File

@@ -1,18 +1,15 @@
run:
timeout: 2m
deadline: 2m
linters:
disable-all: true
enable:
- gofmt
- gofumpt
- gosimple
- gocyclo
- misspell
- govet
linters-settings:
gofumpt:
extra-rules: true
goimports:
local-prefixes: sigs.k8s.io/descheduler
local-prefixes: sigs.k8s.io/descheduler

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.21.5
FROM golang:1.17.7
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .

View File

@@ -14,10 +14,8 @@
.PHONY: test
export CONTAINER_ENGINE ?= docker
# VERSION is based on a date stamp plus the last commit
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags --match "v*")
BRANCH?=$(shell git branch --show-current)
SHA1?=$(shell git rev-parse HEAD)
BUILD=$(shell date +%FT%T%z)
@@ -26,12 +24,9 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.55.2
GOLANGCI_VERSION := v1.43.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOFUMPT_VERSION := v0.4.0
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
# REGISTRY is the container registry to push
# into. The default is to push to the staging
# registry, not production.
@@ -43,10 +38,6 @@ IMAGE:=descheduler:$(VERSION)
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# CURRENT_DIR is the current dir where the Makefile exists
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
# TODO: upload binaries to GCS bucket
#
# In the future binaries can be uploaded to
@@ -69,42 +60,42 @@ build.arm64:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
dev-image: build
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
docker build -f Dockerfile.dev -t $(IMAGE) .
image:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
image.amd64:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
image.arm:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
image.arm64:
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
docker build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
push: image
gcloud auth configure-docker
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
docker tag $(IMAGE) $(IMAGE_GCLOUD)
docker push $(IMAGE_GCLOUD)
push-all: image.amd64 image.arm image.arm64
gcloud auth configure-docker
for arch in $(ARCHS); do \
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
docker tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
docker push $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
for arch in $(ARCHS); do \
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
done
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(IMAGE_GCLOUD) ;\
clean:
rm -rf _output
rm -rf _tmp
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-gen
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
verify-govet:
./hack/verify-govet.sh
@@ -118,8 +109,8 @@ verify-gofmt:
verify-vendor:
./hack/verify-vendor.sh
verify-docs:
./hack/verify-docs.sh
verify-toc:
./hack/verify-toc.sh
test-unit:
./test/run-unit-tests.sh
@@ -131,16 +122,12 @@ gen:
./hack/update-generated-conversions.sh
./hack/update-generated-deep-copies.sh
./hack/update-generated-defaulters.sh
./hack/update-docs.sh
gen-docker:
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:1.21.5 gen
./hack/update-toc.sh
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
./hack/verify-docs.sh
lint:
ifndef HAS_GOLANGCI
@@ -148,30 +135,13 @@ ifndef HAS_GOLANGCI
endif
./_output/bin/golangci-lint run
fmt:
ifndef HAS_GOFUMPT
go install mvdan.cc/gofumpt@${GOFUMPT_VERSION}
endif
gofumpt -w -extra .
# helm
ensure-helm-install:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif
lint-chart: ensure-helm-install
helm lint ./charts/descheduler
build-helm:
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
test-helm: ensure-helm-install
./test/run-helm-tests.sh
kind-multi-node:
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
ct-helm:
./hack/verify-chart.sh
ensure-helm-install:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif

9
OWNERS
View File

@@ -2,17 +2,14 @@ approvers:
- damemi
- ingvagabund
- seanmalloy
- a7i
- knelasevero
reviewers:
- aveshagarwal
- k82cn
- ravisantoshgudimetla
- damemi
- seanmalloy
- ingvagabund
- lixiang233
- a7i
- janeliul
- knelasevero
- jklaw90
emeritus_approvers:
- aveshagarwal
- k82cn

749
README.md

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.29.0
appVersion: 0.29.0
version: 0.23.2
appVersion: 0.23.1
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -43,48 +43,30 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default |
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `timeZone` | configure `timeZone` for CronJob | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
| Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |

View File

@@ -1,12 +1 @@
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.replicas 1.0}}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- if .Values.leaderElection }}
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
WARNING: You enabled DryRun mode, you can't use Leader Election.
{{- end}}
{{- end}}
{{- end}}
Descheduler installed as a {{ .Values.kind }} .

View File

@@ -65,30 +65,3 @@ Create the name of the service account to use
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect={{ .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{- if .Values.leaderElection.resourceNamescape }}
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -6,7 +6,7 @@ metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
rules:
- apiGroups: ["events.k8s.io"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
@@ -24,13 +24,11 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- if .Values.podSecurityPolicy.create }}
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "descheduler.fullname" . }}
{{- end }}
{{- end -}}

View File

@@ -2,11 +2,10 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:
policy.yaml: |
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}

View File

@@ -3,7 +3,6 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
@@ -21,14 +20,8 @@ spec:
{{- if .Values.failedJobsHistoryLimit }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }}
{{- if .Values.timeZone }}
timeZone: {{ .Values.timeZone }}
{{- end }}
jobTemplate:
spec:
{{- if .Values.ttlSecondsAfterFinished }}
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
{{- end }}
template:
metadata:
name: {{ template "descheduler.fullname" . }}
@@ -51,10 +44,6 @@ spec:
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
@@ -73,25 +62,31 @@ spec:
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 16 }}
- "/bin/descheduler"
args:
- --policy-config-file=/policy-dir/policy.yaml
- "--policy-config-file"
- "/policy-dir/policy.yaml"
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 16 }}
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
{{- end }}
volumes:
- name: policy-volume
configMap:

View File

@@ -3,18 +3,10 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
{{- if gt .Values.replicas 1.0}}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
@@ -31,31 +23,27 @@ spec:
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 12 }}
- "/bin/descheduler"
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key }}{{ if $value }}={{ $value }}{{ end }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:
- containerPort: 10258
protocol: TCP
@@ -64,14 +52,16 @@ spec:
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
volumes:
- name: policy-volume
configMap:

View File

@@ -0,0 +1,38 @@
{{- if .Values.podSecurityPolicy.create -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "descheduler.fullname" . }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'configMap'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1
max: 65535
readOnlyRootFilesystem: true
{{- end -}}

View File

@@ -1,27 +0,0 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
{{- end }}
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -3,7 +3,6 @@ apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}

View File

@@ -1,44 +0,0 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,29 @@
apiVersion: v1
kind: Pod
metadata:
name: descheduler-test-pod
annotations:
"helm.sh/hook": test
spec:
restartPolicy: Never
serviceAccountName: descheduler-ci
containers:
- name: descheduler-test-container
image: alpine:latest
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- All
privileged: false
runAsNonRoot: false
command: ["/bin/ash"]
args:
- -c
- >-
apk --no-cache add curl &&
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
chmod +x ./kubectl &&
mv ./kubectl /usr/local/bin/kubectl &&
/usr/local/bin/kubectl get pods --namespace kube-system --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"

View File

@@ -6,13 +6,12 @@
kind: CronJob
image:
repository: registry.k8s.io/descheduler/descheduler
repository: k8s.gcr.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets:
# - name: container-registry-secret
imagePullSecrets: []
resources:
requests:
@@ -22,87 +21,32 @@ resources:
# cpu: 100m
# memory: 128Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
podSecurityContext: {}
# fsGroup: 1000
nameOverride: ""
fullnameOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
schedule: "*/2 * * * *"
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
# failedJobsHistoryLimit: 1
# ttlSecondsAfterFinished 600
# timeZone: Etc/UTC
#startingDeadlineSeconds: 200
#successfulJobsHistoryLimit: 1
#failedJobsHistoryLimit: 1
# Required when running as a Deployment
deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
# NOTE: Leader election can't be activated if DryRun enabled
leaderElection: {}
# enabled: true
# leaseDuration: 15s
# renewDeadline: 10s
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamescape: "kube-system"
command:
- "/bin/descheduler"
cmdOptions:
v: 3
# Recommended to use the latest Policy API version supported by the Descheduler app version
deschedulerPolicyAPIVersion: "descheduler/v1alpha1"
# evict-local-storage-pods:
# max-pods-to-evict-per-node: 10
# node-selector: "key1=value1,key2=value2"
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# ignorePvcPods: true
# evictLocalStoragePods: true
# tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: ""
# serviceName: ""
# serviceNamespace: ""
# sampleRate: 1.0
# fallbackToNoOpProviderOnError: true
strategies:
RemoveDuplicates:
enabled: true
RemovePodsHavingTooManyRestarts:
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
RemovePodsViolatingNodeTaints:
enabled: true
RemovePodsViolatingNodeAffinity:
@@ -112,10 +56,6 @@ deschedulerPolicy:
- requiredDuringSchedulingIgnoredDuringExecution
RemovePodsViolatingInterPodAntiAffinity:
enabled: true
RemovePodsViolatingTopologySpreadConstraint:
enabled: true
params:
includeSoftConstraints: false
LowNodeUtilization:
enabled: true
params:
@@ -144,15 +84,7 @@ affinity: {}
# values:
# - e2e-az1
# - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
tolerations: []
# - key: 'management'
# operator: 'Equal'
@@ -163,6 +95,10 @@ rbac:
# Specifies whether RBAC resources should be created
create: true
podSecurityPolicy:
# Specifies whether PodSecurityPolicy should be created.
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
@@ -172,12 +108,6 @@ serviceAccount:
# Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
podLabels: {}
dnsConfig: {}
livenessProbe:
failureThreshold: 3
httpGet:
@@ -187,38 +117,3 @@ livenessProbe:
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
#
ipFamilyPolicy: ""
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
# E.g.
# ipFamilies:
# - IPv6
# - IPv4
ipFamilies: []
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
additionalLabels: {}
# prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

View File

@@ -1,7 +1,7 @@
# See https://cloud.google.com/cloud-build/docs/build-config
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
timeout: 3600s
timeout: 1200s
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
# or any new substitutions added in the future.
options:

View File

@@ -18,18 +18,14 @@ limitations under the License.
package options
import (
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
componentbaseoptions "k8s.io/component-base/config/options"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
"sigs.k8s.io/descheduler/pkg/tracing"
)
const (
@@ -41,10 +37,8 @@ type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
EventClient clientset.Interface
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool
EnableHTTP2 bool
}
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
@@ -64,21 +58,9 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
}
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
ResourceLock: "leases",
ResourceName: "descheduler",
ResourceNamespace: "kube-system",
},
}
versionedCfg := v1alpha1.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Default(&versionedCfg)
cfg := componentconfig.DeschedulerConfiguration{
Tracing: componentconfig.TracingConfiguration{},
}
cfg := componentconfig.DeschedulerConfiguration{}
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
return nil, err
}
@@ -87,23 +69,12 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
fs.Float32Var(&rs.ClientConnection.QPS, "client-connection-qps", rs.ClientConnection.QPS, "QPS to use for interacting with kubernetes apiserver.")
fs.Int32Var(&rs.ClientConnection.Burst, "client-connection-burst", rs.ClientConnection.Burst, "Burst to use for interacting with kubernetes apiserver.")
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
rs.SecureServing.AddFlags(fs)
}

View File

@@ -27,19 +27,15 @@ import (
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/tracing"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/featuregate"
"k8s.io/component-base/logs"
logsapi "k8s.io/component-base/logs/api/v1"
"k8s.io/component-base/config"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/logs/registry"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
)
@@ -47,35 +43,34 @@ import (
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
s, err := options.NewDeschedulerServer()
if err != nil {
klog.ErrorS(err, "unable to initialize server")
}
featureGate := featuregate.NewFeatureGate()
logConfig := logsapi.NewLoggingConfiguration()
cmd := &cobra.Command{
Use: "descheduler",
Short: "descheduler",
Long: "The descheduler evicts pods which may be bound to less desired nodes",
PreRunE: func(cmd *cobra.Command, args []string) error {
logs.InitLogs()
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
return err
}
descheduler.SetupPlugins()
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
// loopbackClientConfig is a config for a privileged loopback connection
var loopbackClientConfig *restclient.Config
var secureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
// s.Logs.Config.Format = s.Logging.Format
// LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config
var SecureServing *apiserver.SecureServingInfo
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
klog.ErrorS(err, "failed to apply secure server configuration")
return err
return
}
secureServing.DisableHTTP2 = !s.EnableHTTP2
factory, _ := registry.LogRegistry.Get(s.Logging.Format)
if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(config.FormatOptions{})
defer logrFlush()
klog.SetLogger(log)
}
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
@@ -86,42 +81,28 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
stoppedCh, _, err := secureServing.Serve(pathRecorderMux, 0, ctx.Done())
stoppedCh, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
if err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return err
return
}
if err = Run(ctx, s); err != nil {
err = Run(ctx, s)
if err != nil {
klog.ErrorS(err, "descheduler server")
return err
}
done()
// wait for metrics server to close
<-stoppedCh
return nil
},
}
cmd.SetOut(out)
flags := cmd.Flags()
s.AddFlags(flags)
runtime.Must(logsapi.AddFeatureGates(featureGate))
logsapi.AddFlags(logConfig, flags)
return cmd
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
err := tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
if err != nil {
return err
}
defer tracing.Shutdown(ctx)
// increase the fake watch channel so the dry-run mode can be run
// over a cluster with thousands of pods
watch.DefaultChanSize = 100000
return descheduler.Run(ctx, rs)
}

View File

@@ -24,7 +24,7 @@ import (
)
func NewVersionCommand() *cobra.Command {
versionCmd := &cobra.Command{
var versionCmd = &cobra.Command{
Use: "version",
Short: "Version of descheduler",
Long: `Prints the version of descheduler.`,

View File

@@ -20,9 +20,15 @@ import (
"os"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
func init() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)

View File

@@ -1,63 +0,0 @@
## descheduler
descheduler
### Synopsis
The descheduler evicts pods which may be bound to less desired nodes
```
descheduler [flags]
```
### Options
```
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
--dry-run Execute descheduler in dry run mode.
--enable-http2 If http/2 should be enabled for the metrics and health check
-h, --help help for descheduler
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
--otel-sample-rate float Sample rate to collect the Traces (default 1)
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
--policy-config-file string File with descheduler policy configuration.
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
-v, --v Level number for the log level verbosity
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
```
### SEE ALSO
* [descheduler version](descheduler_version.md) - Version of descheduler

View File

@@ -1,22 +0,0 @@
## descheduler version
Version of descheduler
### Synopsis
Prints the version of descheduler.
```
descheduler version [flags]
```
### Options
```
-h, --help help for version
```
### SEE ALSO
* [descheduler](descheduler.md) - descheduler

View File

@@ -20,7 +20,7 @@ make
Run descheduler.
```sh
./_output/bin/descheduler --client-connection-kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
```
View all CLI options.
@@ -31,7 +31,7 @@ View all CLI options.
## Run Tests
```
GOOS=linux make dev-image
make kind-multi-node
kind create cluster --config hack/kind_config.yaml
kind load docker-image <image name>
kind get kubeconfig > /tmp/admin.conf
export KUBECONFIG=/tmp/admin.conf
@@ -39,38 +39,17 @@ make test-unit
make test-e2e
```
## Format Code
After making changes in the code base, ensure that the code is formatted correctly:
## Run Helm Tests
Run the helm test for a particular descheduler release by setting below variables,
```
HELM_IMAGE_REPO="descheduler"
HELM_IMAGE_TAG="helm-test"
HELM_CHART_LOCATION="./charts/descheduler"
```
The helm tests runs as part of descheduler CI. But, to run it manually from the descheduler root,
```
make fmt
```
## Build Helm Package locally
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
```
make build-helm
```
## Lint Helm Chart locally
To check linting of your changes in the helm chart locally you can run:
```
make lint-chart
```
## Test helm changes locally with kind and ct
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
```
make kind-multi-node
make ct-helm
make test-helm
```
### Miscellaneous

View File

@@ -1,784 +0,0 @@
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
<p align="center">
<img src="assets/logo/descheduler-stacked-color.png" width="40%" align="center" alt="descheduler">
</p>
# Descheduler for Kubernetes
Scheduling in Kubernetes is the process of binding pending pods to nodes, and is performed by
a component of Kubernetes called kube-scheduler. The scheduler's decisions, whether or where a
pod can or can not be scheduled, are guided by its configurable policy which comprises of set of
rules, called predicates and priorities. The scheduler's decisions are influenced by its view of
a Kubernetes cluster at that point of time when a new pod appears for scheduling.
As Kubernetes clusters are very dynamic and their state changes over time, there may be desire
to move already running pods to some other nodes for various reasons:
* Some nodes are under or over utilized.
* The original scheduling decision does not hold true any more, as taints or labels are added to
or removed from nodes, pod/node affinity requirements are not satisfied any more.
* Some nodes failed and their pods moved to other nodes.
* New nodes are added to clusters.
Consequently, there might be several pods scheduled on less desired nodes in a cluster.
Descheduler, based on its policy, finds pods that can be moved and evicts them. Please
note, in current implementation, descheduler does not schedule replacement of evicted pods
but relies on the default scheduler for that.
Table of Contents
=================
<!-- toc -->
- [Quick Start](#quick-start)
- [Run As A Job](#run-as-a-job)
- [Run As A CronJob](#run-as-a-cronjob)
- [Run As A Deployment](#run-as-a-deployment)
- [Install Using Helm](#install-using-helm)
- [Install Using Kustomize](#install-using-kustomize)
- [User Guide](#user-guide)
- [Policy and Strategies](#policy-and-strategies)
- [RemoveDuplicates](#removeduplicates)
- [LowNodeUtilization](#lownodeutilization)
- [HighNodeUtilization](#highnodeutilization)
- [RemovePodsViolatingInterPodAntiAffinity](#removepodsviolatinginterpodantiaffinity)
- [RemovePodsViolatingNodeAffinity](#removepodsviolatingnodeaffinity)
- [RemovePodsViolatingNodeTaints](#removepodsviolatingnodetaints)
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering)
- [Label filtering](#label-filtering)
- [Node Fit filtering](#node-fit-filtering)
- [Pod Evictions](#pod-evictions)
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
- [High Availability](#high-availability)
- [Configure HA Mode](#configure-ha-mode)
- [Metrics](#metrics)
- [Compatibility Matrix](#compatibility-matrix)
- [Getting Involved and Contributing](#getting-involved-and-contributing)
- [Communicating With Contributors](#communicating-with-contributors)
- [Roadmap](#roadmap)
- [Code of conduct](#code-of-conduct)
<!-- /toc -->
## Quick Start
The descheduler can be run as a `Job`, `CronJob`, or `Deployment` inside of a k8s cluster. It has the
advantage of being able to be run multiple times without needing user intervention.
The descheduler pod is run as a critical pod in the `kube-system` namespace to avoid
being evicted by itself or by the kubelet.
### Run As A Job
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/job/job.yaml
```
### Run As A CronJob
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/cronjob/cronjob.yaml
```
### Run As A Deployment
```
kubectl create -f kubernetes/base/rbac.yaml
kubectl create -f kubernetes/base/configmap.yaml
kubectl create -f kubernetes/deployment/deployment.yaml
```
### Install Using Helm
Starting with release v0.18.0 there is an official helm chart that can be used to install the
descheduler. See the [helm chart README](https://github.com/kubernetes-sigs/descheduler/blob/master/charts/descheduler/README.md) for detailed instructions.
The descheduler helm chart is also listed on the [artifact hub](https://artifacthub.io/packages/helm/descheduler/descheduler).
### Install Using Kustomize
You can use kustomize to install descheduler.
See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/kustomize/cmd/build/) for detailed instructions.
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.29.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.29.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.29.0' | kubectl apply -f -
```
## User Guide
See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
The policy includes a common configuration that applies to all the strategies:
| Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
**Policy:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true
evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40
ignorePvcPods: false
strategies:
...
```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
ReplicationController (RC), StatefulSet, or Job running on the same node. If there are more,
those duplicate pods are evicted for better spreading of pods in a cluster. This issue could happen
if some nodes went down due to whatever reasons, and pods on them were moved to other nodes leading to
more than one pod associated with a RS or RC, for example, running on the same node. Once the failed nodes
are ready again, this strategy could be enabled to evict those duplicate pods.
It provides one optional parameter, `excludeOwnerKinds`, which is a list of OwnerRef `Kind`s. If a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction. Note that
pods created by Deployments are considered for eviction by this strategy. The `excludeOwnerKinds` parameter
should include `ReplicaSet` to have pods created by Deployments excluded.
**Parameters:**
|Name|Type|
|---|---|
|`excludeOwnerKinds`|list(string)|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveDuplicates":
enabled: true
params:
removeDuplicates:
excludeOwnerKinds:
- "ReplicaSet"
```
### LowNodeUtilization
This strategy finds nodes that are under utilized and evicts pods, if possible, from other nodes
in the hope that recreation of evicted pods will be scheduled on these underutilized nodes. The
parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage (the percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node).
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
There is another configurable threshold, `targetThresholds`, that is used to compute those potential nodes
from where pods could be evicted. If a node's usage is above targetThreshold for any (cpu, memory, number of pods, or extended resources),
the node is considered over utilized. Any node between the thresholds, `thresholds` and `targetThresholds` is
considered appropriately utilized and is not considered for eviction. The threshold, `targetThresholds`,
can be configured for cpu, memory, and number of pods too in terms of percentage.
These thresholds, `thresholds` and `targetThresholds`, could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`targetThresholds`|map(string:int)|
|`numberOfNodes`|int|
|`useDeviationThresholds`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`.
If any of these resource types is not specified, all its thresholds default to 100% to avoid nodes going from underutilized to overutilized.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional,
and will not be used to compute node's usage if it's not specified in `thresholds` and `targetThresholds` explicitly.
* `thresholds` or `targetThresholds` can not be nil and they must configure exactly the same types of resources.
* The valid range of the resource's percentage value is \[0, 100\]
* Percentage value of `thresholds` can not be greater than `targetThresholds` for the same resource.
There is another parameter associated with the `LowNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
are above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
calculated as the current resources requested on the node vs [total allocatable](https://kubernetes.io/docs/concepts/architecture/nodes/#capacity).
For pods, this means the number of pods on the node as a fraction of the pod capacity set for that node.
If a node's usage is below threshold for all (cpu, memory, number of pods and extended resources), the node is considered underutilized.
Currently, pods request resource requirements are considered for computing node resource utilization.
Any node above `thresholds` is considered appropriately utilized and is not considered for eviction.
The `thresholds` param could be tuned as per your cluster requirements. Note that this
strategy evicts pods from `underutilized nodes` (those with usage below `thresholds`)
so that they can be recreated in appropriately utilized nodes.
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
**Parameters:**
|Name|Type|
|---|---|
|`thresholds`|map(string:int)|
|`numberOfNodes`|int|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|`Namespaces`|(see [namespace filtering](#namespace-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
```
Policy should pass the following validation checks:
* Three basic native types of resources are supported: `cpu`, `memory` and `pods`. If any of these resource types is not specified, all its thresholds default to 100%.
* Extended resources are supported. For example, resource type `nvidia.com/gpu` is specified for GPU node utilization. Extended resources are optional, and will not be used to compute node's usage if it's not specified in `thresholds` explicitly.
* `thresholds` can not be nil.
* The valid range of the resource's percentage value is \[0, 100\]
There is another parameter associated with the `HighNodeUtilization` strategy, called `numberOfNodes`.
This parameter can be configured to activate the strategy only when the number of under utilized nodes
is above the configured value. This could be helpful in large clusters where a few nodes could go
under utilized frequently or for a short period of time. By default, `numberOfNodes` is set to zero.
### RemovePodsViolatingInterPodAntiAffinity
This strategy makes sure that pods violating interpod anti-affinity are removed from nodes. For example,
if there is podA on a node and podB and podC (running on the same node) have anti-affinity rules which prohibit
them to run on the same node, then podA will be evicted from the node so that podB and podC could run. This
issue could happen, when the anti-affinity rules for podB and podC are created when they are already running on
node.
**Parameters:**
|Name|Type|
|---|---|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
```
### RemovePodsViolatingNodeAffinity
This strategy makes sure all pods violating
[node affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)
are eventually removed from nodes. Node affinity rules allow a pod to specify
`requiredDuringSchedulingIgnoredDuringExecution` type, which tells the scheduler
to respect node affinity when scheduling the pod but kubelet to ignore
in case node changes over time and no longer respects the affinity.
When enabled, the strategy serves as a temporary implementation
of `requiredDuringSchedulingRequiredDuringExecution` and evicts pod for kubelet
that no longer respects node affinity.
For example, there is podA scheduled on nodeA which satisfies the node
affinity rule `requiredDuringSchedulingIgnoredDuringExecution` at the time
of scheduling. Over time nodeA stops to satisfy the rule. When the strategy gets
executed and there is another node available that satisfies the node affinity rule,
podA gets evicted from nodeA.
**Parameters:**
|Name|Type|
|---|---|
|`nodeAffinityType`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
```
### RemovePodsViolatingNodeTaints
This strategy makes sure that pods violating NoSchedule taints on nodes are removed. For example there is a
pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` scheduled and running on the tainted
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
and will be evicted.
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
key=value matches an excludedTaints entry, the taint will be ignored.
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
**Parameters:**
|Name|Type|
|---|---|
|`excludedTaints`|list(string)|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
````yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingNodeTaints":
enabled: true
params:
excludedTaints:
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
- reserved # exclude all taints with key "reserved"
````
### RemovePodsViolatingTopologySpreadConstraint
This strategy makes sure that pods violating [topology spread constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
are evicted from nodes. Specifically, it tries to evict the minimum number of pods required to balance topology domains to within each constraint's `maxSkew`.
This strategy requires k8s version 1.18 at a minimum.
By default, this strategy only deals with hard constraints, setting parameter `includeSoftConstraints` to `true` will
include soft constraints.
Strategy parameter `labelSelector` is not utilized when balancing topology domains and is only applied during eviction to determine if the pod can be evicted.
**Parameters:**
|Name|Type|
|---|---|
|`includeSoftConstraints`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: false
```
### RemovePodsHavingTooManyRestarts
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
**Parameters:**
|Name|Type|
|---|---|
|`podRestartThreshold`|int|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
```
### PodLifeTime
This strategy evicts pods that are older than `maxPodLifeTimeSeconds`.
You can also specify `states` parameter to **only** evict pods matching the following conditions:
- [Pod Phase](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase) status of: `Running`, `Pending`
- [Container State Waiting](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-state-waiting) condition of: `PodInitializing`, `ContainerCreating`
If a value for `states` or `podStatusPhases` is not specified,
Pods in any state (even `Running`) are considered for eviction.
**Parameters:**
|Name|Type|Notes|
|---|---|---|
|`maxPodLifeTimeSeconds`|int||
|`podStatusPhases`|list(string)|Deprecated in v0.25+ Use `states` instead|
|`states`|list(string)|Only supported in v0.25+|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))||
|`namespaces`|(see [namespace filtering](#namespace-filtering))||
|`labelSelector`|(see [label filtering](#label-filtering))||
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
states:
- "Pending"
- "PodInitializing"
```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods
### Namespace filtering
The following strategies accept a `namespaces` parameter which allows to specify a list of including, resp. excluding namespaces:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
* `LowNodeUtilization` and `HighNodeUtilization` (Only filtered right before eviction)
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
include:
- "namespace1"
- "namespace2"
```
In the examples `PodLifeTime` gets executed only over `namespace1` and `namespace2`.
The similar holds for `exclude` field:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
namespaces:
exclude:
- "namespace1"
- "namespace2"
```
The strategy gets executed over all namespaces but `namespace1` and `namespace2`.
It's not allowed to compute `include` with `exclude` field.
### Priority filtering
All strategies are able to configure a priority threshold, only pods under the threshold can be evicted. You can
specify this threshold by setting `thresholdPriorityClassName`(setting the threshold to the value of the given
priority class) or `thresholdPriority`(directly setting the threshold) parameters. By default, this threshold
is set to the value of `system-cluster-critical` priority class.
Note: Setting `evictSystemCriticalPods` to true disables priority filtering entirely.
E.g.
Setting `thresholdPriority`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriority: 10000
```
Setting `thresholdPriorityClassName`
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
thresholdPriorityClassName: "priorityclass1"
```
Note that you can't configure both `thresholdPriority` and `thresholdPriorityClassName`, if the given priority class
does not exist, descheduler won't create it and will throw an error.
### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.26/#labelselector-v1-meta)
to filter pods by their labels:
* `PodLifeTime`
* `RemovePodsHavingTooManyRestarts`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in.
For example:
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 86400
labelSelector:
matchLabels:
component: redis
matchExpressions:
- {key: tier, operator: In, values: [cache]}
- {key: environment, operator: NotIn, values: [dev]}
```
### Node Fit filtering
The following strategies accept a `nodeFit` boolean parameter which can optimize descheduling:
* `RemoveDuplicates`
* `LowNodeUtilization`
* `HighNodeUtilization`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod
- Any `tolerations` on the pod and any `taints` on the other nodes
- `nodeAffinity` on the pod
- Resource `requests` made by the pod and the resources available on other nodes
- Whether any of the other nodes are marked as `unschedulable`
E.g.
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeFit: true
nodeResourceUtilizationThresholds:
thresholds:
"cpu": 20
"memory": 20
"pods": 20
targetThresholds:
"cpu": 50
"memory": 50
"pods": 50
```
Note that node fit filtering references the current pod spec, and not that of it's owner.
Thus, if the pod is owned by a ReplicationController (and that ReplicationController was modified recently),
the pod may be running with an outdated spec, which the descheduler will reference when determining node fit.
This is expected behavior as the descheduler is a "best-effort" mechanism.
Using Deployments instead of ReplicationControllers provides an automated rollout of pod spec changes, therefore ensuring that the descheduler has an up-to-date view of the cluster state.

View File

@@ -1,16 +0,0 @@
# Proposals
This document walk you through about all the enhancements proposals for descheduler.
## Descheduler v1alpha2 Design Proposal
```yaml
title: Descheduler v1alpha2 Design Proposal
authors:
- "@damemi"
link:
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
- https://github.com/kubernetes-sigs/descheduler/issues/679
owning-sig: sig-scheduling
creation-date: 2021-05-01
status: implementable
```

View File

@@ -1,82 +1,36 @@
# Release Guide
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
time, it would be good to automate as much of this process as possible. However, due to current limitations there
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
## Container Image
## Pre-release Code Changes
### Semi-automatic
Before publishing each release, the following code updates must be made:
1. Make sure your repo is clean by git's standards
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
### Manual
## Release Process
1. Make sure your repo is clean by git's standards
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
7. Publish a draft release using the tag you just created
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
9. Publish release
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
(the flowchart below demonstrates these steps):
**Version release**
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
4. Cut release branch from `master`, eg `release-1.24`
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
**Patch release**
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
3. Merge Helm chart version update to release branch
4. Perform the image promotion process for the patch version
5. Publish release using Github's release process from the git tag you created
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
### Flowchart
![Flowchart for major and patch releases](release-process.png)
### Image promotion process
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
These automated image builds are snapshots of the code in place at the time of every PR merge and
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
copies that image to a public registry.
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
## Helm Chart
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
a `release-*` branch.
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
version of the descheduler helm chart is used to version changes specific to the helm chart.
1. Merge all helm chart changes into the master branch before the release is tagged/cut
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
2. Make sure your repo is clean by git's standards
3. Follow the release-branch or patch release tagging pattern from the above section.
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
## Notes
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
### Notes
It's important to create the tag on the master branch after creating the release-* branch so that the [Helm releaser-action](#helm-chart) can work.
It compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
will be nothing to compare and it will fail (creating a new release branch usually involves no code changes). For this same reason, you should
also tag patch releases (on the release-* branch) *after* pushing changes (if those changes involve bumping the Helm chart version).
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
@@ -102,3 +56,19 @@ Pull image from the staging registry.
```
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
```
## Helm Chart
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
Released versions of the helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action)
is setup to build and push the helm charts to the `gh-pages` branch when changes are pushed to a `release-*` branch.
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
version of the descheduler helm chart is used to version changes specific to the helm chart.
1. Merge all helm chart changes into the master branch before the release is tagged/cut
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
2. Make sure your repo is clean by git's standards
3. Follow the release-branch or patch release tagging pattern from the above section.
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

View File

@@ -2,41 +2,67 @@
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
Descheduler Version | Container Image | Architectures |
------------------- |-------------------------------------------------|-------------------------|
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
v0.28.1 | registry.k8s.io/descheduler/descheduler:v0.28.1 | AMD64<br>ARM64<br>ARMv7 |
v0.28.0 | registry.k8s.io/descheduler/descheduler:v0.28.0 | AMD64<br>ARM64<br>ARMv7 |
v0.27.1 | registry.k8s.io/descheduler/descheduler:v0.27.1 | AMD64<br>ARM64<br>ARMv7 |
v0.27.0 | registry.k8s.io/descheduler/descheduler:v0.27.0 | AMD64<br>ARM64<br>ARMv7 |
v0.26.1 | registry.k8s.io/descheduler/descheduler:v0.26.1 | AMD64<br>ARM64<br>ARMv7 |
v0.26.0 | registry.k8s.io/descheduler/descheduler:v0.26.0 | AMD64<br>ARM64<br>ARMv7 |
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
Descheduler Version | Container Image | Architectures |
------------------- |--------------------------------------------|-------------------------|
v0.23.0 | k8s.gcr.io/descheduler/descheduler:v0.23.0 | AMD64<br>ARM64<br>ARMv7 |
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |
v0.18.0 | k8s.gcr.io/descheduler/descheduler:v0.18.0 | AMD64 |
v0.10.0 | k8s.gcr.io/descheduler/descheduler:v0.10.0 | AMD64 |
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
starting with descheduler release v0.20.0 use the below process to download the official descheduler
image into a kind cluster.
```
kind create cluster
docker pull registry.k8s.io/descheduler/descheduler:v0.20.0
kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
docker pull k8s.gcr.io/descheduler/descheduler:v0.20.0
kind load docker-image k8s.gcr.io/descheduler/descheduler:v0.20.0
```
## Policy Configuration Examples
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
## CLI Options
The descheduler has many CLI options that can be used to override its default behavior. Please check the [CLI Options](./cli/descheduler.md) documentation for details
The descheduler has many CLI options that can be used to override its default behavior.
```
descheduler --help
The descheduler evicts pods which may be bound to less desired nodes
Usage:
descheduler [flags]
descheduler [command]
Available Commands:
help Help about any command
version Version of descheduler
Flags:
--add-dir-header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
--dry-run execute descheduler in dry run mode.
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
-h, --help help for descheduler
--kubeconfig string File with kube configuration.
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log-dir string If non-empty, write log files in this directory
--log-file string If non-empty, use this log file
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
--logtostderr log to standard error instead of files (default true)
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
--policy-config-file string File with descheduler policy configuration.
--skip-headers If true, avoid header prefixes in the log messages
--skip-log-headers If true, avoid headers when opening log files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
Use "descheduler [command] --help" for more information about a command.
```
## Production Use Cases
This section contains descriptions of real world production use cases.
@@ -58,23 +84,18 @@ descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.y
This policy configuration file ensures that pods created more than 7 days ago are evicted.
```
---
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "PodLifeTime"
args:
maxPodLifeTimeSeconds: 604800
plugins:
deschedule:
enabled:
- "PodLifeTime"
strategies:
"PodLifeTime":
enabled: true
params:
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
```
### Balance Cluster By Node Memory Utilization
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
or `number of pods`.
#### Balance high utilization nodes
@@ -82,58 +103,41 @@ Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memo
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
```
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "LowNodeUtilization"
args:
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70
plugins:
balance:
enabled:
- "LowNodeUtilization"
```
#### Balance low utilization nodes
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
from nodes with memory utilization lower than 20%. This should be used along with scheduler strategy `MostRequestedPriority`.
The evicted pods will be compacted into minimal set of nodes.
```
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "HighNodeUtilization"
args:
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
plugins:
balance:
enabled:
- "HighNodeUtilization"
```
### Autoheal Node Problems
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
---
**NOTE**
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
---

View File

@@ -1,18 +1,14 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemoveFailedPods"
args:
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
plugins:
deschedule:
enabled:
- "RemoveFailedPods"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -1,13 +1,9 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "HighNodeUtilization"
args:
strategies:
"HighNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
plugins:
balance:
enabled:
- "HighNodeUtilization"

View File

@@ -1,15 +1,11 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "LowNodeUtilization"
args:
strategies:
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"memory": 20
targetThresholds:
"memory": 70
plugins:
balance:
enabled:
- "LowNodeUtilization"

View File

@@ -1,13 +1,8 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsViolatingNodeAffinity"
args:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"
plugins:
deschedule:
enabled:
- "RemovePodsViolatingNodeAffinity"
strategies:
"RemovePodsViolatingNodeAffinity":
enabled: true
params:
nodeAffinityType:
- "requiredDuringSchedulingIgnoredDuringExecution"

View File

@@ -1,15 +1,8 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "PodLifeTime"
args:
strategies:
"PodLifeTime":
enabled: true
params:
podLifeTime:
maxPodLifeTimeSeconds: 604800 # 7 days
states:
- "Pending"
- "PodInitializing"
plugins:
deschedule:
enabled:
- "PodLifeTime"

View File

@@ -1,36 +1,29 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemoveDuplicates"
- name: "RemovePodsViolatingInterPodAntiAffinity"
- name: "LowNodeUtilization"
args:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
- name: "RemovePodsHavingTooManyRestarts"
args:
podRestartThreshold: 100
includingInitContainers: true
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
deschedule:
enabled:
- "RemovePodsViolatingInterPodAntiAffinity"
- "RemovePodsHavingTooManyRestarts"
balance:
enabled:
- "RemoveDuplicates"
- "LowNodeUtilization"
- "RemovePodsViolatingTopologySpreadConstraint"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
"RemovePodsHavingTooManyRestarts":
enabled: true
params:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -1,15 +0,0 @@
apiVersion: "descheduler/v1alpha2"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsHavingTooManyRestarts"
args:
podRestartThreshold: 100
includingInitContainers: true
states:
- CrashLoopBackOff
plugins:
deschedule:
enabled:
- "RemovePodsHavingTooManyRestarts"

View File

@@ -1,14 +1,7 @@
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "RemovePodsViolatingTopologySpreadConstraint"
args:
constraints:
- DoNotSchedule
- ScheduleAnyway
plugins:
balance:
enabled:
- "RemovePodsViolatingTopologySpreadConstraint"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

188
go.mod
View File

@@ -1,125 +1,117 @@
module sigs.k8s.io/descheduler
go 1.21
go 1.17
require (
github.com/client9/misspell v0.3.4
github.com/google/go-cmp v0.6.0
github.com/spf13/cobra v1.8.0
github.com/spf13/cobra v1.2.1
github.com/spf13/pflag v1.0.5
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
google.golang.org/grpc v1.60.1
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
k8s.io/apiserver v0.29.0
k8s.io/client-go v0.29.0
k8s.io/code-generator v0.29.0
k8s.io/component-base v0.29.0
k8s.io/component-helpers v0.29.0
k8s.io/klog/v2 v2.110.1
k8s.io/utils v0.0.0-20231127182322-b307cd553661
sigs.k8s.io/mdtoc v1.1.0
k8s.io/api v0.23.0
k8s.io/apimachinery v0.23.0
k8s.io/apiserver v0.23.0
k8s.io/client-go v0.23.0
k8s.io/code-generator v0.23.0
k8s.io/component-base v0.23.0
k8s.io/component-helpers v0.23.0
k8s.io/klog/v2 v2.30.0
k8s.io/kubectl v0.20.5
sigs.k8s.io/mdtoc v1.0.1
)
require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
github.com/google/cel-go v0.17.7 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.5 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
go.etcd.io/etcd/client/v3 v3.5.10 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.16.1 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
google.golang.org/protobuf v1.31.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
k8s.io/kms v0.29.0 // indirect
k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace (
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
// CVE-2023-48795: https://github.com/kubernetes-sigs/descheduler/security/code-scanning/17
// TODO: remove this replace once the upstream dependency is updated to v0.29.1
k8s.io/api => k8s.io/api v0.0.0-20231220172311-84c476802242
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20231220171733-60eaa653342b
k8s.io/client-go => k8s.io/client-go v0.0.0-20231220173006-5a0a4247921d
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

1169
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,257 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
# meta.) Assumes you care about pulls from remote "upstream" and
# checks them out to a branch named:
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
set -o errexit
set -o nounset
set -o pipefail
REPO_ROOT="$(git rev-parse --show-toplevel)"
declare -r REPO_ROOT
cd "${REPO_ROOT}"
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
declare -r STARTINGBRANCH
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
DRY_RUN=${DRY_RUN:-""}
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
FORK_REMOTE=${FORK_REMOTE:-origin}
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
if [[ -z ${GITHUB_USER:-} ]]; then
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
exit 1
fi
if ! command -v gh > /dev/null; then
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
exit 1
fi
if [[ "$#" -lt 2 ]]; then
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
echo
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
echo " Examples:"
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
echo
echo " Set the DRY_RUN environment var to skip git push and creating PR."
echo " This is useful for creating patches to a release branch without making a PR."
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
echo
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
echo " This is useful when picking commits containing changes to API documentation."
echo
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
echo " to override the default remote names to what you have locally."
echo
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
exit 2
fi
# Checks if you are logged in. Will error/bail if you are not.
gh auth status
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
echo "!!! Dirty tree. Clean up and try again."
exit 1
fi
if [[ -e "${REBASEMAGIC}" ]]; then
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
exit 1
fi
declare -r BRANCH="$1"
shift 1
declare -r PULLS=( "$@" )
function join { local IFS="$1"; shift; echo "$*"; }
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
declare -r PULLDASH
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
declare -r PULLSUBJ
echo "+++ Updating remotes..."
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
exit 1
fi
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
declare -r NEWBRANCHREQ
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
declare -r NEWBRANCH
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
declare -r NEWBRANCHUNIQ
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
cleanbranch=""
gitamcleanup=false
function return_to_kansas {
if [[ "${gitamcleanup}" == "true" ]]; then
echo
echo "+++ Aborting in-progress git am."
git am --abort >/dev/null 2>&1 || true
fi
# return to the starting branch and delete the PR text file
if [[ -z "${DRY_RUN}" ]]; then
echo
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
if [[ -n "${cleanbranch}" ]]; then
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
fi
fi
}
trap return_to_kansas EXIT
SUBJECTS=()
function make-a-pr() {
local rel
rel="$(basename "${BRANCH}")"
echo
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
local numandtitle
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
prtext=$(cat <<EOF
Cherry pick of ${PULLSUBJ} on ${rel}.
${numandtitle}
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
\`\`\`release-note
\`\`\`
EOF
)
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
}
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
cleanbranch="${NEWBRANCHUNIQ}"
gitamcleanup=true
for pull in "${PULLS[@]}"; do
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
echo
echo "+++ About to attempt cherry pick of PR. To reattempt:"
echo " $ git am -3 /tmp/${pull}.patch"
echo
git am -3 "/tmp/${pull}.patch" || {
conflicts=false
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|| [[ -e "${REBASEMAGIC}" ]]; do
conflicts=true # <-- We should have detected conflicts once
echo
echo "+++ Conflicts detected:"
echo
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
echo
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
echo
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
done
if [[ "${conflicts}" != "true" ]]; then
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
exit 1
fi
}
# set the subject
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
SUBJECTS+=("#${pull}: ${subject}")
# remove the patch file from /tmp
rm -f "/tmp/${pull}.patch"
done
gitamcleanup=false
# Re-generate docs (if needed)
if [[ -n "${REGENERATE_DOCS}" ]]; then
echo
echo "Regenerating docs..."
if ! hack/generate-docs.sh; then
echo
echo "hack/generate-docs.sh FAILED to complete."
exit 1
fi
fi
if [[ -n "${DRY_RUN}" ]]; then
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
echo "To return to the branch you were in when you invoked this script:"
echo
echo " git checkout ${STARTINGBRANCH}"
echo
echo "To delete this branch:"
echo
echo " git branch -D ${NEWBRANCHUNIQ}"
exit 0
fi
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
echo "This isn't normal. Leaving you with push instructions:"
echo
echo "+++ First manually push the branch this script created:"
echo
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
echo
make-a-pr
cleanbranch=""
exit 0
fi
echo
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
echo
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
echo
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
echo "Aborting." >&2
exit 1
fi
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
make-a-pr

View File

@@ -1,20 +0,0 @@
package main
import (
"log"
"os"
"github.com/spf13/cobra/doc"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
var docGenPath = "docs/cli"
func main() {
cmd := app.NewDeschedulerCommand(os.Stdout)
cmd.AddCommand(app.NewVersionCommand())
cmd.DisableAutoGenTag = true // Disable this so that the diff wont track it
if err := doc.GenMarkdownTree(cmd, docGenPath); err != nil {
log.Fatal(err)
}
}

View File

@@ -1,25 +0,0 @@
#!/usr/bin/env bash
# Utility command based on 'find' command. The pipeline is as following:
# 1. find all the go files; (exclude specific path: vendor etc)
# 2. find all the files containing specific tags in contents;
# 3. extract related dirs;
# 4. remove duplicated paths;
# 5. merge all dirs in array with delimiter ,;
#
# Example:
# find_dirs_containing_comment_tags("+k8s:")
# Return:
# sigs.k8s.io/descheduler/a,sigs.k8s.io/descheduler/b,sigs.k8s.io/descheduler/c
function find_dirs_containing_comment_tags() {
array=()
while IFS='' read -r line; do array+=("$line"); done < <( \
find . -type f -name \*.go -not -path "./vendor/*" -not -path "./_tmp/*" -print0 \
| xargs -0 grep --color=never -l "$@" \
| xargs -n1 dirname \
| LC_ALL=C sort -u \
)
IFS=",";
printf '%s' "${array[*]}";
}

View File

@@ -1,10 +1,9 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.conversion

View File

@@ -1,11 +1,10 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.deepcopy

View File

@@ -1,11 +1,10 @@
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2023 The Kubernetes Authors.
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,6 +18,8 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
go run ${SCRIPT_DIR}/doc-gen
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md

View File

@@ -1 +0,0 @@
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""

View File

@@ -16,7 +16,12 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
./hack/update-generated-conversions.sh
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
${OS_OUTPUT_BINPATH}/conversion-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig/v1alpha1,./pkg/api/v1alpha1" \
--output-file-base zz_generated.conversion
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1

View File

@@ -16,7 +16,12 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
./hack/update-generated-deep-copies.sh
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
${OS_OUTPUT_BINPATH}/deepcopy-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "./pkg/apis/componentconfig,./pkg/apis/componentconfig/v1alpha1,./pkg/api,./pkg/api/v1alpha1" \
--output-file-base zz_generated.deepcopy
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1

View File

@@ -15,7 +15,13 @@ git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_des
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
./hack/update-generated-defaulters.sh
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.18|go1.19|go1.20|go1.21') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

View File

@@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2023 The Kubernetes Authors.
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,18 +18,12 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
temp_dir=$(mktemp -d)
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
go run -ldflags "-X main.docGenPath=${temp_dir}" ${SCRIPT_DIR}/doc-gen
if ! _out="$(diff -Naupr ${SCRIPT_DIR}/../docs/cli "${temp_dir}")"; then
echo "Generated output differs:" >&2
echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-docs.sh"
rm -rf ${temp_dir}
exit 1
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
then
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
exit 1
fi
rm -rf ${temp_dir}

View File

@@ -6,29 +6,23 @@ metadata:
namespace: kube-system
data:
policy.yaml: |
apiVersion: "descheduler/v1alpha2"
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
profiles:
- name: ProfileName
pluginConfig:
- name: "DefaultEvictor"
- name: "RemovePodsViolatingInterPodAntiAffinity"
- name: "RemoveDuplicates"
- name: "LowNodeUtilization"
args:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50
plugins:
balance:
enabled:
- "LowNodeUtilization"
- "RemoveDuplicates"
deschedule:
enabled:
- "RemovePodsViolatingInterPodAntiAffinity"
strategies:
"RemoveDuplicates":
enabled: true
"RemovePodsViolatingInterPodAntiAffinity":
enabled: true
"LowNodeUtilization":
enabled: true
params:
nodeResourceUtilizationThresholds:
thresholds:
"cpu" : 20
"memory": 20
"pods": 20
targetThresholds:
"cpu" : 50
"memory": 50
"pods": 50

View File

@@ -4,7 +4,7 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: descheduler-cluster-role
rules:
- apiGroups: ["events.k8s.io"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
@@ -22,13 +22,6 @@ rules:
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["descheduler"]
verbs: ["get", "patch", "delete"]
---
apiVersion: v1
kind: ServiceAccount
@@ -48,3 +41,4 @@ subjects:
- name: descheduler-sa
kind: ServiceAccount
namespace: kube-system

View File

@@ -1,5 +1,5 @@
---
apiVersion: batch/v1
apiVersion: batch/v1 # for k8s version < 1.21.0, use batch/v1beta1
kind: CronJob
metadata:
name: descheduler-cronjob
@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0
image: k8s.gcr.io/descheduler/descheduler:v0.23.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0
image: k8s.gcr.io/descheduler/descheduler:v0.23.0
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: registry.k8s.io/descheduler/descheduler:v0.29.0
image: k8s.gcr.io/descheduler/descheduler:v0.23.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume

View File

@@ -43,34 +43,14 @@ var (
Subsystem: DeschedulerSubsystem,
Name: "build_info",
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
StabilityLevel: metrics.ALPHA,
},
)
DeschedulerLoopDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_loop_duration_seconds",
Help: "Time taken to complete a full descheduling cycle",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
}, []string{})
DeschedulerStrategyDuration = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Subsystem: DeschedulerSubsystem,
Name: "descheduler_strategy_duration_seconds",
Help: "Time taken to complete Each strategy of the descheduling operation",
StabilityLevel: metrics.ALPHA,
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
}, []string{"strategy", "profile"})
metricsList = []metrics.Registerable{
PodsEvicted,
buildInfo,
DeschedulerLoopDuration,
DeschedulerStrategyDuration,
}
)

View File

@@ -1,17 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api

View File

@@ -1,23 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import "sort"
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
sort.Slice(profiles, func(i, j int) bool {
return profiles[i].Name < profiles[j].Name
})
return profiles
}

View File

@@ -19,7 +19,6 @@ package api
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -27,12 +26,24 @@ import (
type DeschedulerPolicy struct {
metav1.TypeMeta
// Profiles
Profiles []DeschedulerProfile
// Strategies
Strategies StrategyList
// NodeSelector for a set of nodes to operate over
NodeSelector *string
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
EvictSystemCriticalPods *bool
// IgnorePVCPods prevents pods with PVCs from being evicted.
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint
@@ -40,44 +51,71 @@ type DeschedulerPolicy struct {
MaxNoOfPodsToEvictPerNamespace *uint
}
type StrategyName string
type StrategyList map[StrategyName]DeschedulerStrategy
type DeschedulerStrategy struct {
// Enabled or disabled
Enabled bool
// Weight
Weight int
// Strategy parameters
Params *StrategyParameters
}
// Namespaces carries a list of included/excluded namespaces
// for which a given strategy is applicable
type Namespaces struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
Include []string
Exclude []string
}
type (
Percentage float64
ResourceThresholds map[v1.ResourceName]Percentage
)
type PriorityThreshold struct {
Value *int32 `json:"value"`
Name string `json:"name"`
// Besides Namespaces only one of its members may be specified
// TODO(jchaloup): move Namespaces ThresholdPriority and ThresholdPriorityClassName to individual strategies
// once the policy version is bumped to v1alpha2
type StrategyParameters struct {
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds
NodeAffinityType []string
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
PodLifeTime *PodLifeTime
RemoveDuplicates *RemoveDuplicates
FailedPods *FailedPods
IncludeSoftConstraints bool
Namespaces *Namespaces
ThresholdPriority *int32
ThresholdPriorityClassName string
LabelSelector *metav1.LabelSelector
NodeFit bool
}
type DeschedulerProfile struct {
Name string
PluginConfigs []PluginConfig
Plugins Plugins
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
Thresholds ResourceThresholds
TargetThresholds ResourceThresholds
NumberOfNodes int
}
type PluginConfig struct {
Name string
Args runtime.Object
type PodsHavingTooManyRestarts struct {
PodRestartThreshold int32
IncludingInitContainers bool
}
type Plugins struct {
PreSort PluginSet
Sort PluginSet
Deschedule PluginSet
Balance PluginSet
Filter PluginSet
PreEvictionFilter PluginSet
type RemoveDuplicates struct {
ExcludeOwnerKinds []string
}
type PluginSet struct {
Enabled []string
Disabled []string
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint
PodStatusPhases []string
}
type FailedPods struct {
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}

View File

@@ -1,271 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
)
var (
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
// used for defaulting/converting typed PluginConfig Args.
// Access via getPluginArgConversionScheme()
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
// evictorImpl implements the Evictor interface so plugins
// can evict a pod without importing a specific pod evictor
type evictorImpl struct {
podEvictor *evictions.PodEvictor
evictorFilter frameworktypes.EvictorPlugin
}
var _ frameworktypes.Evictor = &evictorImpl{}
// Filter checks if a pod can be evicted
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
return ei.evictorFilter.Filter(pod)
}
// PreEvictionFilter checks if pod can be evicted right before eviction
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
return ei.evictorFilter.PreEvictionFilter(pod)
}
// Evict evicts a pod (no pre-check performed)
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
return ei.podEvictor.EvictPod(ctx, pod, opts)
}
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
return ei.podEvictor.NodeLimitExceeded(node)
}
// handleImpl implements the framework handle which gets passed to plugins
type handleImpl struct {
clientSet clientset.Interface
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictor *evictorImpl
}
var _ frameworktypes.Handle = &handleImpl{}
// ClientSet retrieves kube client set
func (hi *handleImpl) ClientSet() clientset.Interface {
return hi.clientSet
}
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
return hi.getPodsAssignedToNodeFunc
}
// SharedInformerFactory retrieves shared informer factory
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
return hi.sharedInformerFactory
}
// Evictor retrieves evictor so plugins can filter and evict pods
func (hi *handleImpl) Evictor() frameworktypes.Evictor {
return hi.evictor
}
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
err := V1alpha1ToInternal(in, pluginregistry.PluginRegistry, out, s)
if err != nil {
return err
}
return nil
}
func V1alpha1ToInternal(
deschedulerPolicy *DeschedulerPolicy,
registry pluginregistry.Registry,
out *api.DeschedulerPolicy,
s conversion.Scope,
) error {
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
if evictSystemCriticalPods {
klog.V(1).Info("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
}
}
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
var profiles []api.DeschedulerProfile
// Build profiles
for name, strategy := range deschedulerPolicy.Strategies {
if _, ok := pluginregistry.PluginRegistry[string(name)]; ok {
if strategy.Enabled {
params := strategy.Params
if params == nil {
params = &StrategyParameters{}
}
nodeFit := false
if name != "PodLifeTime" {
nodeFit = params.NodeFit
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
klog.ErrorS(fmt.Errorf("priority threshold misconfigured"), "only one of priorityThreshold fields can be set", "pluginName", name)
return fmt.Errorf("priority threshold misconfigured for plugin %v", name)
}
var priorityThreshold *api.PriorityThreshold
if strategy.Params != nil {
priorityThreshold = &api.PriorityThreshold{
Value: strategy.Params.ThresholdPriority,
Name: strategy.Params.ThresholdPriorityClassName,
}
}
var pluginConfig *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[string(name)]; exists {
pluginConfig, err = pcFnc(params)
if err != nil {
klog.ErrorS(err, "skipping strategy", "strategy", name)
return fmt.Errorf("failed to get plugin config for strategy %v: %v", name, err)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
return fmt.Errorf("unknown strategy name: %v", name)
}
profile := api.DeschedulerProfile{
Name: fmt.Sprintf("strategy-%v-profile", name),
PluginConfigs: []api.PluginConfig{
{
Name: defaultevictor.PluginName,
Args: &defaultevictor.DefaultEvictorArgs{
EvictLocalStoragePods: evictLocalStoragePods,
EvictSystemCriticalPods: evictSystemCriticalPods,
IgnorePvcPods: ignorePvcPods,
EvictFailedBarePods: evictBarePods,
NodeFit: nodeFit,
PriorityThreshold: priorityThreshold,
},
},
*pluginConfig,
},
Plugins: api.Plugins{
Filter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
PreEvictionFilter: api.PluginSet{
Enabled: []string{defaultevictor.PluginName},
},
},
}
pluginArgs := registry[string(name)].PluginArgInstance
pluginInstance, err := registry[string(name)].PluginBuilder(pluginArgs, &handleImpl{})
if err != nil {
klog.ErrorS(fmt.Errorf("could not build plugin"), "plugin build error", "plugin", name)
return fmt.Errorf("could not build plugin: %v", name)
}
// pluginInstance can be of any of each type, or both
profilePlugins := profile.Plugins
profile.Plugins = enableProfilePluginsByType(profilePlugins, pluginInstance, pluginConfig)
profiles = append(profiles, profile)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
return fmt.Errorf("unknown strategy name: %v", name)
}
}
out.Profiles = profiles
out.NodeSelector = deschedulerPolicy.NodeSelector
out.MaxNoOfPodsToEvictPerNamespace = deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace
out.MaxNoOfPodsToEvictPerNode = deschedulerPolicy.MaxNoOfPodsToEvictPerNode
return nil
}
func enableProfilePluginsByType(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
profilePlugins = checkBalance(profilePlugins, pluginInstance, pluginConfig)
profilePlugins = checkDeschedule(profilePlugins, pluginInstance, pluginConfig)
return profilePlugins
}
func checkBalance(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
_, ok := pluginInstance.(frameworktypes.BalancePlugin)
if ok {
klog.V(3).Infof("converting Balance plugin: %s", pluginInstance.Name())
profilePlugins.Balance.Enabled = []string{pluginConfig.Name}
}
return profilePlugins
}
func checkDeschedule(profilePlugins api.Plugins, pluginInstance frameworktypes.Plugin, pluginConfig *api.PluginConfig) api.Plugins {
_, ok := pluginInstance.(frameworktypes.DeschedulePlugin)
if ok {
klog.V(3).Infof("converting Deschedule plugin: %s", pluginInstance.Name())
profilePlugins.Deschedule.Enabled = []string{pluginConfig.Name}
}
return profilePlugins
}
// Register Conversions
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
return nil
}

View File

@@ -15,6 +15,7 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha1 is the v1alpha1 version of the descheduler API

View File

@@ -28,10 +28,8 @@ var (
)
// GroupName is the group name used in this package
const (
GroupName = "descheduler"
GroupVersion = "v1alpha1"
)
const GroupName = "descheduler"
const GroupVersion = "v1alpha1"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}
@@ -50,7 +48,7 @@ func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs, RegisterConversions)
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {

View File

@@ -1,256 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
// Once all strategies are migrated the arguments get read from the configuration file
// without any wiring. Keeping the wiring here so the descheduler can still use
// the v1alpha1 configuration during the strategy migration to plugins.
var StrategyParamsToPluginArgs = map[string]func(params *StrategyParameters) (*api.PluginConfig, error){
"RemovePodsViolatingNodeTaints": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
IncludePreferNoSchedule: params.IncludePreferNoSchedule,
ExcludedTaints: params.ExcludedTaints,
}
if err := removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodetaints.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodetaints.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingnodetaints.PluginName,
Args: args,
}, nil
},
"RemoveFailedPods": func(params *StrategyParameters) (*api.PluginConfig, error) {
failedPodsParams := params.FailedPods
if failedPodsParams == nil {
failedPodsParams = &FailedPods{}
}
args := &removefailedpods.RemoveFailedPodsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
IncludingInitContainers: failedPodsParams.IncludingInitContainers,
MinPodLifetimeSeconds: failedPodsParams.MinPodLifetimeSeconds,
ExcludeOwnerKinds: failedPodsParams.ExcludeOwnerKinds,
Reasons: failedPodsParams.Reasons,
}
if err := removefailedpods.ValidateRemoveFailedPodsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removefailedpods.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removefailedpods.PluginName, err)
}
return &api.PluginConfig{
Name: removefailedpods.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingNodeAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
NodeAffinityType: params.NodeAffinityType,
}
if err := removepodsviolatingnodeaffinity.ValidateRemovePodsViolatingNodeAffinityArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingnodeaffinity.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingnodeaffinity.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingnodeaffinity.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingInterPodAntiAffinity": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
}
if err := removepodsviolatinginterpodantiaffinity.ValidateRemovePodsViolatingInterPodAntiAffinityArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatinginterpodantiaffinity.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatinginterpodantiaffinity.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatinginterpodantiaffinity.PluginName,
Args: args,
}, nil
},
"RemovePodsHavingTooManyRestarts": func(params *StrategyParameters) (*api.PluginConfig, error) {
tooManyRestartsParams := params.PodsHavingTooManyRestarts
if tooManyRestartsParams == nil {
tooManyRestartsParams = &PodsHavingTooManyRestarts{}
}
args := &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
PodRestartThreshold: tooManyRestartsParams.PodRestartThreshold,
IncludingInitContainers: tooManyRestartsParams.IncludingInitContainers,
}
if err := removepodshavingtoomanyrestarts.ValidateRemovePodsHavingTooManyRestartsArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodshavingtoomanyrestarts.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodshavingtoomanyrestarts.PluginName, err)
}
return &api.PluginConfig{
Name: removepodshavingtoomanyrestarts.PluginName,
Args: args,
}, nil
},
"PodLifeTime": func(params *StrategyParameters) (*api.PluginConfig, error) {
podLifeTimeParams := params.PodLifeTime
if podLifeTimeParams == nil {
podLifeTimeParams = &PodLifeTime{}
}
var states []string
if podLifeTimeParams.PodStatusPhases != nil {
states = append(states, podLifeTimeParams.PodStatusPhases...)
}
if podLifeTimeParams.States != nil {
states = append(states, podLifeTimeParams.States...)
}
args := &podlifetime.PodLifeTimeArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
MaxPodLifeTimeSeconds: podLifeTimeParams.MaxPodLifeTimeSeconds,
States: states,
}
if err := podlifetime.ValidatePodLifeTimeArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", podlifetime.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", podlifetime.PluginName, err)
}
return &api.PluginConfig{
Name: podlifetime.PluginName,
Args: args,
}, nil
},
"RemoveDuplicates": func(params *StrategyParameters) (*api.PluginConfig, error) {
args := &removeduplicates.RemoveDuplicatesArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
}
if params.RemoveDuplicates != nil {
args.ExcludeOwnerKinds = params.RemoveDuplicates.ExcludeOwnerKinds
}
if err := removeduplicates.ValidateRemoveDuplicatesArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removeduplicates.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removeduplicates.PluginName, err)
}
return &api.PluginConfig{
Name: removeduplicates.PluginName,
Args: args,
}, nil
},
"RemovePodsViolatingTopologySpreadConstraint": func(params *StrategyParameters) (*api.PluginConfig, error) {
constraints := []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule}
if params.IncludeSoftConstraints {
constraints = append(constraints, v1.ScheduleAnyway)
}
args := &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Namespaces: v1alpha1NamespacesToInternal(params.Namespaces),
LabelSelector: params.LabelSelector,
Constraints: constraints,
TopologyBalanceNodeFit: utilpointer.Bool(true),
}
if err := removepodsviolatingtopologyspreadconstraint.ValidateRemovePodsViolatingTopologySpreadConstraintArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", removepodsviolatingtopologyspreadconstraint.PluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", removepodsviolatingtopologyspreadconstraint.PluginName, err)
}
return &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: args,
}, nil
},
"HighNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
if params.NodeResourceUtilizationThresholds == nil {
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
}
args := &nodeutilization.HighNodeUtilizationArgs{
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := nodeutilization.ValidateHighNodeUtilizationArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.HighNodeUtilizationPluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.HighNodeUtilizationPluginName, err)
}
return &api.PluginConfig{
Name: nodeutilization.HighNodeUtilizationPluginName,
Args: args,
}, nil
},
"LowNodeUtilization": func(params *StrategyParameters) (*api.PluginConfig, error) {
if params.NodeResourceUtilizationThresholds == nil {
params.NodeResourceUtilizationThresholds = &NodeResourceUtilizationThresholds{}
}
args := &nodeutilization.LowNodeUtilizationArgs{
EvictableNamespaces: v1alpha1NamespacesToInternal(params.Namespaces),
Thresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.Thresholds),
TargetThresholds: v1alpha1ThresholdToInternal(params.NodeResourceUtilizationThresholds.TargetThresholds),
UseDeviationThresholds: params.NodeResourceUtilizationThresholds.UseDeviationThresholds,
NumberOfNodes: params.NodeResourceUtilizationThresholds.NumberOfNodes,
}
if err := nodeutilization.ValidateLowNodeUtilizationArgs(args); err != nil {
klog.ErrorS(err, "unable to validate plugin arguments", "pluginName", nodeutilization.LowNodeUtilizationPluginName)
return nil, fmt.Errorf("strategy %q param validation failed: %v", nodeutilization.LowNodeUtilizationPluginName, err)
}
return &api.PluginConfig{
Name: nodeutilization.LowNodeUtilizationPluginName,
Args: args,
}, nil
},
}
func v1alpha1NamespacesToInternal(namespaces *Namespaces) *api.Namespaces {
internal := &api.Namespaces{}
if namespaces != nil {
if namespaces.Exclude != nil {
internal.Exclude = namespaces.Exclude
}
if namespaces.Include != nil {
internal.Include = namespaces.Include
}
} else {
internal = nil
}
return internal
}
func v1alpha1ThresholdToInternal(thresholds ResourceThresholds) api.ResourceThresholds {
internal := make(api.ResourceThresholds, len(thresholds))
for k, v := range thresholds {
internal[k] = api.Percentage(float64(v))
}
return internal
}

View File

@@ -1,859 +0,0 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
utilpointer "k8s.io/utils/pointer"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
"sigs.k8s.io/descheduler/pkg/framework/plugins/podlifetime"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removefailedpods"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodshavingtoomanyrestarts"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatinginterpodantiaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodeaffinity"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingtopologyspreadconstraint"
)
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeTaints(t *testing.T) {
strategyName := "RemovePodsViolatingNodeTaints"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
ExcludedTaints: []string{
"dedicated=special-user",
"reserved",
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingnodetaints.PluginName,
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
ExcludedTaints: []string{"dedicated=special-user", "reserved"},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemoveFailedPods(t *testing.T) {
strategyName := "RemoveFailedPods"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
FailedPods: &FailedPods{
MinPodLifetimeSeconds: utilpointer.Uint(3600),
ExcludeOwnerKinds: []string{"Job"},
Reasons: []string{"NodeAffinity"},
IncludingInitContainers: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removefailedpods.PluginName,
Args: &removefailedpods.RemoveFailedPodsArgs{
ExcludeOwnerKinds: []string{"Job"},
MinPodLifetimeSeconds: utilpointer.Uint(3600),
Reasons: []string{"NodeAffinity"},
IncludingInitContainers: true,
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingNodeAffinity(t *testing.T) {
strategyName := "RemovePodsViolatingNodeAffinity"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingnodeaffinity.PluginName,
Args: &removepodsviolatingnodeaffinity.RemovePodsViolatingNodeAffinityArgs{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params, not setting nodeaffinity type",
params: &StrategyParameters{},
err: fmt.Errorf("strategy \"%s\" param validation failed: nodeAffinityType needs to be set", strategyName),
result: nil,
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeAffinityType: []string{"requiredDuringSchedulingIgnoredDuringExecution"},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingInterPodAntiAffinity(t *testing.T) {
strategyName := "RemovePodsViolatingInterPodAntiAffinity"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatinginterpodantiaffinity.PluginName,
Args: &removepodsviolatinginterpodantiaffinity.RemovePodsViolatingInterPodAntiAffinityArgs{
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsHavingTooManyRestarts(t *testing.T) {
strategyName := "RemovePodsHavingTooManyRestarts"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
PodRestartThreshold: 100,
IncludingInitContainers: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodshavingtoomanyrestarts.PluginName,
Args: &removepodshavingtoomanyrestarts.RemovePodsHavingTooManyRestartsArgs{
PodRestartThreshold: 100,
IncludingInitContainers: true,
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
{
description: "invalid params restart threshold",
params: &StrategyParameters{
PodsHavingTooManyRestarts: &PodsHavingTooManyRestarts{
PodRestartThreshold: 0,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: invalid PodsHavingTooManyRestarts threshold", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsPodLifeTime(t *testing.T) {
strategyName := "PodLifeTime"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
States: []string{
"Pending",
"PodInitializing",
},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: podlifetime.PluginName,
Args: &podlifetime.PodLifeTimeArgs{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
States: []string{
"Pending",
"PodInitializing",
},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
{
description: "invalid params MaxPodLifeTimeSeconds not set",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: MaxPodLifeTimeSeconds not set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemoveDuplicates(t *testing.T) {
strategyName := "RemoveDuplicates"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
RemoveDuplicates: &RemoveDuplicates{
ExcludeOwnerKinds: []string{"ReplicaSet"},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removeduplicates.PluginName,
Args: &removeduplicates.RemoveDuplicatesArgs{
ExcludeOwnerKinds: []string{"ReplicaSet"},
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
PodLifeTime: &PodLifeTime{
MaxPodLifeTimeSeconds: utilpointer.Uint(86400),
},
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsRemovePodsViolatingTopologySpreadConstraint(t *testing.T) {
strategyName := "RemovePodsViolatingTopologySpreadConstraint"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
IncludeSoftConstraints: true,
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule, v1.ScheduleAnyway},
TopologyBalanceNodeFit: utilpointer.Bool(true),
Namespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "params without soft constraints",
params: &StrategyParameters{
IncludeSoftConstraints: false,
},
err: nil,
result: &api.PluginConfig{
Name: removepodsviolatingtopologyspreadconstraint.PluginName,
Args: &removepodsviolatingtopologyspreadconstraint.RemovePodsViolatingTopologySpreadConstraintArgs{
Constraints: []v1.UnsatisfiableConstraintAction{v1.DoNotSchedule},
TopologyBalanceNodeFit: utilpointer.Bool(true),
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
Namespaces: &Namespaces{
Exclude: []string{"test1"},
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only one of Include/Exclude namespaces can be set", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsHighNodeUtilization(t *testing.T) {
strategyName := "HighNodeUtilization"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: nodeutilization.HighNodeUtilizationPluginName,
Args: &nodeutilization.HighNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
"cpu": api.Percentage(20),
"memory": api.Percentage(20),
"pods": api.Percentage(20),
},
NumberOfNodes: 3,
EvictableNamespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
},
Namespaces: &Namespaces{
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
result: nil,
},
{
description: "invalid params nil ResourceThresholds",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: no resource threshold is configured", strategyName),
result: nil,
},
{
description: "invalid params out of bounds threshold",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(150),
},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: cpu threshold not in [0, 100] range", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}
func TestStrategyParamsToPluginArgsLowNodeUtilization(t *testing.T) {
strategyName := "LowNodeUtilization"
type testCase struct {
description string
params *StrategyParameters
err error
result *api.PluginConfig
}
testCases := []testCase{
{
description: "wire in all valid parameters",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
TargetThresholds: ResourceThresholds{
"cpu": Percentage(50),
"memory": Percentage(50),
"pods": Percentage(50),
},
UseDeviationThresholds: true,
},
ThresholdPriority: utilpointer.Int32(100),
Namespaces: &Namespaces{
Exclude: []string{"test1"},
},
},
err: nil,
result: &api.PluginConfig{
Name: nodeutilization.LowNodeUtilizationPluginName,
Args: &nodeutilization.LowNodeUtilizationArgs{
Thresholds: api.ResourceThresholds{
"cpu": api.Percentage(20),
"memory": api.Percentage(20),
"pods": api.Percentage(20),
},
TargetThresholds: api.ResourceThresholds{
"cpu": api.Percentage(50),
"memory": api.Percentage(50),
"pods": api.Percentage(50),
},
UseDeviationThresholds: true,
NumberOfNodes: 3,
EvictableNamespaces: &api.Namespaces{
Exclude: []string{"test1"},
},
},
},
},
{
description: "invalid params namespaces",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(20),
"memory": Percentage(20),
"pods": Percentage(20),
},
TargetThresholds: ResourceThresholds{
"cpu": Percentage(50),
"memory": Percentage(50),
"pods": Percentage(50),
},
UseDeviationThresholds: true,
},
Namespaces: &Namespaces{
Include: []string{"test2"},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: only Exclude namespaces can be set, inclusion is not supported", strategyName),
result: nil,
},
{
description: "invalid params nil ResourceThresholds",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: no resource threshold is configured", strategyName),
result: nil,
},
{
description: "invalid params out of bounds threshold",
params: &StrategyParameters{
NodeResourceUtilizationThresholds: &NodeResourceUtilizationThresholds{
NumberOfNodes: 3,
Thresholds: ResourceThresholds{
"cpu": Percentage(150),
},
},
},
err: fmt.Errorf("strategy \"%s\" param validation failed: thresholds config is not valid: cpu threshold not in [0, 100] range", strategyName),
result: nil,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var result *api.PluginConfig
var err error
if pcFnc, exists := StrategyParamsToPluginArgs[strategyName]; exists {
result, err = pcFnc(tc.params)
}
if err != nil {
if err.Error() != tc.err.Error() {
t.Errorf("unexpected error: %s", err.Error())
}
}
if err == nil {
// sort to easily compare deepequality
diff := cmp.Diff(tc.result, result)
if diff != "" {
t.Errorf("test '%s' failed. Results are not deep equal. mismatch (-want +got):\n%s", tc.description, diff)
}
}
})
}
}

View File

@@ -45,16 +45,14 @@ type DeschedulerPolicy struct {
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type (
StrategyName string
StrategyList map[StrategyName]DeschedulerStrategy
)
type StrategyName string
type StrategyList map[StrategyName]DeschedulerStrategy
type DeschedulerStrategy struct {
// Enabled or disabled
@@ -88,20 +86,15 @@ type StrategyParameters struct {
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
NodeFit bool `json:"nodeFit"`
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
ExcludedTaints []string `json:"excludedTaints,omitempty"`
}
type (
Percentage float64
ResourceThresholds map[v1.ResourceName]Percentage
)
type Percentage float64
type ResourceThresholds map[v1.ResourceName]Percentage
type NodeResourceUtilizationThresholds struct {
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
NumberOfNodes int `json:"numberOfNodes,omitempty"`
}
type PodsHavingTooManyRestarts struct {
@@ -115,10 +108,7 @@ type RemoveDuplicates struct {
type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
States []string `json:"states,omitempty"`
// Deprecated: Use States instead.
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {

View File

@@ -0,0 +1,391 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DeschedulerStrategy)(nil), (*api.DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(a.(*DeschedulerStrategy), b.(*api.DeschedulerStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerStrategy)(nil), (*DeschedulerStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(a.(*api.DeschedulerStrategy), b.(*DeschedulerStrategy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Namespaces)(nil), (*Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Namespaces_To_v1alpha1_Namespaces(a.(*api.Namespaces), b.(*Namespaces), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeResourceUtilizationThresholds)(nil), (*api.NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(a.(*NodeResourceUtilizationThresholds), b.(*api.NodeResourceUtilizationThresholds), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.NodeResourceUtilizationThresholds)(nil), (*NodeResourceUtilizationThresholds)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(a.(*api.NodeResourceUtilizationThresholds), b.(*NodeResourceUtilizationThresholds), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PodLifeTime)(nil), (*api.PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(a.(*PodLifeTime), b.(*api.PodLifeTime), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PodLifeTime)(nil), (*PodLifeTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(a.(*api.PodLifeTime), b.(*PodLifeTime), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PodsHavingTooManyRestarts)(nil), (*api.PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(a.(*PodsHavingTooManyRestarts), b.(*api.PodsHavingTooManyRestarts), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PodsHavingTooManyRestarts)(nil), (*PodsHavingTooManyRestarts)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(a.(*api.PodsHavingTooManyRestarts), b.(*PodsHavingTooManyRestarts), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RemoveDuplicates)(nil), (*api.RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(a.(*RemoveDuplicates), b.(*api.RemoveDuplicates), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.RemoveDuplicates)(nil), (*RemoveDuplicates)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(a.(*api.RemoveDuplicates), b.(*RemoveDuplicates), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*StrategyParameters)(nil), (*api.StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(a.(*StrategyParameters), b.(*api.StrategyParameters), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.StrategyParameters)(nil), (*StrategyParameters)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(a.(*api.StrategyParameters), b.(*StrategyParameters), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
// Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy is an autogenerated conversion function.
func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
return autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s)
}
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
return nil
}
// Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy is an autogenerated conversion function.
func Convert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
return autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in, out, s)
}
func autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
out.Params = (*api.StrategyParameters)(unsafe.Pointer(in.Params))
return nil
}
// Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy is an autogenerated conversion function.
func Convert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in *DeschedulerStrategy, out *api.DeschedulerStrategy, s conversion.Scope) error {
return autoConvert_v1alpha1_DeschedulerStrategy_To_api_DeschedulerStrategy(in, out, s)
}
func autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Weight = in.Weight
out.Params = (*StrategyParameters)(unsafe.Pointer(in.Params))
return nil
}
// Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy is an autogenerated conversion function.
func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.DeschedulerStrategy, out *DeschedulerStrategy, s conversion.Scope) error {
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
}
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
}
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
}
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
return nil
}
// Convert_v1alpha1_Namespaces_To_api_Namespaces is an autogenerated conversion function.
func Convert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
return autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in, out, s)
}
func autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
return nil
}
// Convert_api_Namespaces_To_v1alpha1_Namespaces is an autogenerated conversion function.
func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Namespaces, s conversion.Scope) error {
return autoConvert_api_Namespaces_To_v1alpha1_Namespaces(in, out, s)
}
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
return nil
}
// Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds is an autogenerated conversion function.
func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
return autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in, out, s)
}
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
out.NumberOfNodes = in.NumberOfNodes
return nil
}
// Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds is an autogenerated conversion function.
func Convert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
return autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in, out, s)
}
func autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
// Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime is an autogenerated conversion function.
func Convert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in *PodLifeTime, out *api.PodLifeTime, s conversion.Scope) error {
return autoConvert_v1alpha1_PodLifeTime_To_api_PodLifeTime(in, out, s)
}
func autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
out.MaxPodLifeTimeSeconds = (*uint)(unsafe.Pointer(in.MaxPodLifeTimeSeconds))
out.PodStatusPhases = *(*[]string)(unsafe.Pointer(&in.PodStatusPhases))
return nil
}
// Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime is an autogenerated conversion function.
func Convert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in *api.PodLifeTime, out *PodLifeTime, s conversion.Scope) error {
return autoConvert_api_PodLifeTime_To_v1alpha1_PodLifeTime(in, out, s)
}
func autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
out.PodRestartThreshold = in.PodRestartThreshold
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts is an autogenerated conversion function.
func Convert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in *PodsHavingTooManyRestarts, out *api.PodsHavingTooManyRestarts, s conversion.Scope) error {
return autoConvert_v1alpha1_PodsHavingTooManyRestarts_To_api_PodsHavingTooManyRestarts(in, out, s)
}
func autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
out.PodRestartThreshold = in.PodRestartThreshold
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts is an autogenerated conversion function.
func Convert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in *api.PodsHavingTooManyRestarts, out *PodsHavingTooManyRestarts, s conversion.Scope) error {
return autoConvert_api_PodsHavingTooManyRestarts_To_v1alpha1_PodsHavingTooManyRestarts(in, out, s)
}
func autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
return nil
}
// Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates is an autogenerated conversion function.
func Convert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in *RemoveDuplicates, out *api.RemoveDuplicates, s conversion.Scope) error {
return autoConvert_v1alpha1_RemoveDuplicates_To_api_RemoveDuplicates(in, out, s)
}
func autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
return nil
}
// Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates is an autogenerated conversion function.
func Convert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in *api.RemoveDuplicates, out *RemoveDuplicates, s conversion.Scope) error {
return autoConvert_api_RemoveDuplicates_To_v1alpha1_RemoveDuplicates(in, out, s)
}
func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
out.NodeResourceUtilizationThresholds = (*api.NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
return nil
}
// Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters is an autogenerated conversion function.
func Convert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *StrategyParameters, out *api.StrategyParameters, s conversion.Scope) error {
return autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in, out, s)
}
func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
out.NodeResourceUtilizationThresholds = (*NodeResourceUtilizationThresholds)(unsafe.Pointer(in.NodeResourceUtilizationThresholds))
out.NodeAffinityType = *(*[]string)(unsafe.Pointer(&in.NodeAffinityType))
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
out.NodeFit = in.NodeFit
return nil
}
// Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters is an autogenerated conversion function.
func Convert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.StrategyParameters, out *StrategyParameters, s conversion.Scope) error {
return autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in, out, s)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -64,12 +64,12 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
*out = new(int)
**out = **in
}
return
@@ -209,11 +209,6 @@ func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = new(uint)
**out = **in
}
if in.States != nil {
in, out := &in.States, &out.States
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
@@ -361,11 +356,6 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.ExcludedTaints != nil {
in, out := &in.ExcludedTaints, &out.ExcludedTaints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -1,136 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
)
var (
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
// used for defaulting/converting typed PluginConfig Args.
// Access via getPluginArgConversionScheme()
pluginArgConversionScheme *runtime.Scheme
initPluginArgConversionScheme sync.Once
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
)
func GetPluginArgConversionScheme() *runtime.Scheme {
initPluginArgConversionScheme.Do(func() {
// set up the scheme used for plugin arg conversion
pluginArgConversionScheme = runtime.NewScheme()
utilruntime.Must(AddToScheme(pluginArgConversionScheme))
utilruntime.Must(api.AddToScheme(pluginArgConversionScheme))
})
return pluginArgConversionScheme
}
func Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
if err := autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s); err != nil {
return err
}
return convertToInternalPluginConfigArgs(out)
}
// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal
// types using a scheme, after applying defaults.
func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
prof := &out.Profiles[i]
for j := range prof.PluginConfigs {
args := prof.PluginConfigs[j].Args
if args == nil {
continue
}
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
continue
}
internalArgs, err := scheme.ConvertToVersion(args, api.SchemeGroupVersion)
if err != nil {
err = nil
internalArgs = args
if err != nil {
return fmt.Errorf("converting .Profiles[%d].PluginConfigs[%d].Args into internal type: %w", i, j, err)
}
}
prof.PluginConfigs[j].Args = internalArgs
}
}
return nil
}
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
if in.Args.Raw != nil {
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
if err != nil {
return err
}
} else if in.Args.Object != nil {
out.Args = in.Args.Object
}
} else {
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
return err
}
}
return nil
}
func Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
if err := autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in, out, s); err != nil {
return err
}
return convertToExternalPluginConfigArgs(out)
}
// convertToExternalPluginConfigArgs converts PluginConfig#Args into
// external (versioned) types using a scheme.
func convertToExternalPluginConfigArgs(out *DeschedulerPolicy) error {
scheme := GetPluginArgConversionScheme()
for i := range out.Profiles {
for j := range out.Profiles[i].PluginConfigs {
args := out.Profiles[i].PluginConfigs[j].Args
if args.Object == nil {
continue
}
if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown {
continue
}
externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion)
if err != nil {
return err
}
out.Profiles[i].PluginConfigs[j].Args.Object = externalArgs
}
}
return nil
}

View File

@@ -1,23 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import "k8s.io/apimachinery/pkg/runtime"
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}

View File

@@ -1,24 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
// +k8s:defaulter-gen=TypeMeta
// Package v1alpha2 is the v1alpha2 version of the descheduler API
// +groupName=descheduler
package v1alpha2 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha2"

View File

@@ -1,26 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import "sort"
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
sort.Slice(profiles, func(i, j int) bool {
return profiles[i].Name < profiles[j].Name
})
return profiles
}

View File

@@ -1,65 +0,0 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type DeschedulerPolicy struct {
metav1.TypeMeta `json:",inline"`
// Profiles
Profiles []DeschedulerProfile `json:"profiles,omitempty"`
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type DeschedulerProfile struct {
Name string `json:"name"`
PluginConfigs []PluginConfig `json:"pluginConfig"`
Plugins Plugins `json:"plugins"`
}
type Plugins struct {
PreSort PluginSet `json:"presort"`
Sort PluginSet `json:"sort"`
Deschedule PluginSet `json:"deschedule"`
Balance PluginSet `json:"balance"`
Filter PluginSet `json:"filter"`
PreEvictionFilter PluginSet `json:"preevictionfilter"`
}
type PluginConfig struct {
Name string `json:"name"`
Args runtime.RawExtension `json:"args"`
}
type PluginSet struct {
Enabled []string `json:"enabled"`
Disabled []string `json:"disabled"`
}

View File

@@ -1,271 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha2
import (
unsafe "unsafe"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
api "sigs.k8s.io/descheduler/pkg/api"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.DeschedulerProfile)(nil), (*DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(a.(*api.DeschedulerProfile), b.(*DeschedulerProfile), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*PluginSet)(nil), (*api.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PluginSet_To_api_PluginSet(a.(*PluginSet), b.(*api.PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.PluginSet)(nil), (*PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_PluginSet_To_v1alpha2_PluginSet(a.(*api.PluginSet), b.(*PluginSet), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Plugins)(nil), (*api.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Plugins_To_api_Plugins(a.(*Plugins), b.(*api.Plugins), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.Plugins)(nil), (*Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Plugins_To_v1alpha2_Plugins(a.(*api.Plugins), b.(*Plugins), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*PluginConfig)(nil), (*api.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_PluginConfig_To_api_PluginConfig(a.(*PluginConfig), b.(*api.PluginConfig), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]api.DeschedulerProfile, len(*in))
for i := range *in {
if err := Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
return nil
}
func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
for i := range *in {
if err := Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Profiles = nil
}
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
return nil
}
func autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
out.Name = in.Name
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]api.PluginConfig, len(*in))
for i := range *in {
if err := Convert_v1alpha2_PluginConfig_To_api_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfigs = nil
}
if err := Convert_v1alpha2_Plugins_To_api_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile is an autogenerated conversion function.
func Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
return autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in, out, s)
}
func autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
out.Name = in.Name
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
if err := Convert_api_PluginConfig_To_v1alpha2_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.PluginConfigs = nil
}
if err := Convert_api_Plugins_To_v1alpha2_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
return err
}
return nil
}
// Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile is an autogenerated conversion function.
func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
}
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
func autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
out.Name = in.Name
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil {
return err
}
return nil
}
// Convert_api_PluginConfig_To_v1alpha2_PluginConfig is an autogenerated conversion function.
func Convert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
return autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in, out, s)
}
func autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
return nil
}
// Convert_v1alpha2_PluginSet_To_api_PluginSet is an autogenerated conversion function.
func Convert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
return autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in, out, s)
}
func autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
return nil
}
// Convert_api_PluginSet_To_v1alpha2_PluginSet is an autogenerated conversion function.
func Convert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
return autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in, out, s)
}
func autoConvert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Sort, &out.Sort, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Balance, &out.Balance, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_Plugins_To_api_Plugins is an autogenerated conversion function.
func Convert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
return autoConvert_v1alpha2_Plugins_To_api_Plugins(in, out, s)
}
func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Sort, &out.Sort, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Balance, &out.Balance, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Filter, &out.Filter, s); err != nil {
return err
}
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
return err
}
return nil
}
// Convert_api_Plugins_To_v1alpha2_Plugins is an autogenerated conversion function.
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
}

View File

@@ -1,162 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(string)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
if in == nil {
return nil
}
out := new(DeschedulerPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
*out = *in
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Plugins.DeepCopyInto(&out.Plugins)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
if in == nil {
return nil
}
out := new(DeschedulerProfile)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
*out = *in
in.Args.DeepCopyInto(&out.Args)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
func (in *PluginConfig) DeepCopy() *PluginConfig {
if in == nil {
return nil
}
out := new(PluginConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
func (in *PluginSet) DeepCopy() *PluginSet {
if in == nil {
return nil
}
out := new(PluginSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugins) DeepCopyInto(out *Plugins) {
*out = *in
in.PreSort.DeepCopyInto(&out.PreSort)
in.Sort.DeepCopyInto(&out.Sort)
in.Deschedule.DeepCopyInto(&out.Deschedule)
in.Balance.DeepCopyInto(&out.Balance)
in.Filter.DeepCopyInto(&out.Filter)
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
func (in *Plugins) DeepCopy() *Plugins {
if in == nil {
return nil
}
out := new(Plugins)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,6 +22,7 @@ limitations under the License.
package api
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -29,11 +30,11 @@ import (
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Profiles != nil {
in, out := &in.Profiles, &out.Profiles
*out = make([]DeschedulerProfile, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
if in.Strategies != nil {
in, out := &in.Strategies, &out.Strategies
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
if in.NodeSelector != nil {
@@ -41,6 +42,26 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
**out = **in
}
if in.EvictSystemCriticalPods != nil {
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
*out = new(bool)
**out = **in
}
if in.IgnorePVCPods != nil {
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
*out = new(bool)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
@@ -73,25 +94,53 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
*out = *in
if in.PluginConfigs != nil {
in, out := &in.PluginConfigs, &out.PluginConfigs
*out = make([]PluginConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
if in.Params != nil {
in, out := &in.Params, &out.Params
*out = new(StrategyParameters)
(*in).DeepCopyInto(*out)
}
in.Plugins.DeepCopyInto(&out.Plugins)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
if in == nil {
return nil
}
out := new(DeschedulerProfile)
out := new(DeschedulerStrategy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
@@ -123,89 +172,94 @@ func (in *Namespaces) DeepCopy() *Namespaces {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
*out = *in
if in.Args != nil {
out.Args = in.Args.DeepCopyObject()
if in.Thresholds != nil {
in, out := &in.Thresholds, &out.Thresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.TargetThresholds != nil {
in, out := &in.TargetThresholds, &out.TargetThresholds
*out = make(ResourceThresholds, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
func (in *PluginConfig) DeepCopy() *PluginConfig {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
if in == nil {
return nil
}
out := new(PluginConfig)
out := new(NodeResourceUtilizationThresholds)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Disabled != nil {
in, out := &in.Disabled, &out.Disabled
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
func (in *PluginSet) DeepCopy() *PluginSet {
if in == nil {
return nil
}
out := new(PluginSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Plugins) DeepCopyInto(out *Plugins) {
*out = *in
in.PreSort.DeepCopyInto(&out.PreSort)
in.Sort.DeepCopyInto(&out.Sort)
in.Deschedule.DeepCopyInto(&out.Deschedule)
in.Balance.DeepCopyInto(&out.Balance)
in.Filter.DeepCopyInto(&out.Filter)
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
func (in *Plugins) DeepCopy() *Plugins {
if in == nil {
return nil
}
out := new(Plugins)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityThreshold) DeepCopyInto(out *PriorityThreshold) {
*out = *in
if in.Value != nil {
in, out := &in.Value, &out.Value
*out = new(int32)
if in.MaxPodLifeTimeSeconds != nil {
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
*out = new(uint)
**out = **in
}
if in.PodStatusPhases != nil {
in, out := &in.PodStatusPhases, &out.PodStatusPhases
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityThreshold.
func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
if in == nil {
return nil
}
out := new(PriorityThreshold)
out := new(PodLifeTime)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
if in == nil {
return nil
}
out := new(PodsHavingTooManyRestarts)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
if in == nil {
return nil
}
out := new(RemoveDuplicates)
in.DeepCopyInto(out)
return out
}
@@ -231,3 +285,86 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in StrategyList) DeepCopyInto(out *StrategyList) {
{
in := &in
*out = make(StrategyList, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
func (in StrategyList) DeepCopy() StrategyList {
if in == nil {
return nil
}
out := new(StrategyList)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = *in
if in.NodeResourceUtilizationThresholds != nil {
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
*out = new(NodeResourceUtilizationThresholds)
(*in).DeepCopyInto(*out)
}
if in.NodeAffinityType != nil {
in, out := &in.NodeAffinityType, &out.NodeAffinityType
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PodsHavingTooManyRestarts != nil {
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
*out = new(PodsHavingTooManyRestarts)
**out = **in
}
if in.PodLifeTime != nil {
in, out := &in.PodLifeTime, &out.PodLifeTime
*out = new(PodLifeTime)
(*in).DeepCopyInto(*out)
}
if in.RemoveDuplicates != nil {
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)
(*in).DeepCopyInto(*out)
}
if in.ThresholdPriority != nil {
in, out := &in.ThresholdPriority, &out.ThresholdPriority
*out = new(int32)
**out = **in
}
if in.LabelSelector != nil {
in, out := &in.LabelSelector, &out.LabelSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
if in == nil {
return nil
}
out := new(StrategyParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -33,7 +33,6 @@ type DeschedulerConfiguration struct {
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
// Deprecated: Use clientConnection.kubeConfig instead.
KubeconfigFile string
// PolicyConfigFile is the filepath to the descheduler policy configuration.
@@ -54,35 +53,7 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool
// Tracing specifies the options for tracing.
Tracing TracingConfiguration
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration
}

View File

@@ -28,10 +28,8 @@ var (
)
// GroupName is the group name use in this package
const (
GroupName = "deschedulercomponentconfig"
GroupVersion = "v1alpha1"
)
const GroupName = "deschedulercomponentconfig"
const GroupVersion = "v1alpha1"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion}

View File

@@ -19,8 +19,9 @@ package v1alpha1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
componentbaseconfig "k8s.io/component-base/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -33,7 +34,6 @@ type DeschedulerConfiguration struct {
// KubeconfigFile is path to kubeconfig file with authorization and master
// location information.
// Deprecated: Use clientConnection.kubeConfig instead.
KubeconfigFile string `json:"kubeconfigFile"`
// PolicyConfigFile is the filepath to the descheduler policy configuration.
@@ -54,35 +54,7 @@ type DeschedulerConfiguration struct {
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
// Tracing is used to setup the required OTEL tracing configuration
Tracing TracingConfiguration `json:"tracing,omitempty"`
// LeaderElection starts Deployment using leader election loop
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
}
type TracingConfiguration struct {
// CollectorEndpoint is the address of the OpenTelemetry collector.
// If not specified, tracing will be used NoopTraceProvider.
CollectorEndpoint string `json:"collectorEndpoint"`
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
// If not specified, provider will start in insecure mode.
TransportCert string `json:"transportCert,omitempty"`
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
// If not specified, the default value is "descheduler".
ServiceName string `json:"serviceName,omitempty"`
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
// If not specified, tracing will be used default namespace.
ServiceNamespace string `json:"serviceNamespace,omitempty"`
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
// not sample anything. Everything else is a percentage value.
SampleRate float64 `json:"sampleRate"`
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
// no op provider in case if the configured end point based provider can't be setup.
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
// Logging specifies the options of logging.
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -46,16 +46,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
}); err != nil {
return err
}
return nil
}
@@ -68,11 +58,7 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
out.Logging = in.Logging
return nil
}
@@ -90,11 +76,7 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
out.EvictLocalStoragePods = in.EvictLocalStoragePods
out.IgnorePVCPods = in.IgnorePVCPods
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
return err
}
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
out.Logging = in.Logging
return nil
}
@@ -102,33 +84,3 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
}
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
}
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
out.CollectorEndpoint = in.CollectorEndpoint
out.TransportCert = in.TransportCert
out.ServiceName = in.ServiceName
out.ServiceNamespace = in.ServiceNamespace
out.SampleRate = in.SampleRate
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
return nil
}
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,9 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -52,19 +50,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2024 The Kubernetes Authors.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,9 +29,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Tracing = in.Tracing
out.LeaderElection = in.LeaderElection
out.ClientConnection = in.ClientConnection
in.Logging.DeepCopyInto(&out.Logging)
return
}
@@ -52,19 +50,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
if in == nil {
return nil
}
out := new(TracingConfiguration)
in.DeepCopyInto(out)
return out
}

View File

@@ -20,42 +20,33 @@ import (
"fmt"
clientset "k8s.io/client-go/kubernetes"
componentbaseconfig "k8s.io/component-base/config"
// Ensure to load all auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
func CreateClient(kubeconfig string) (clientset.Interface, error) {
var cfg *rest.Config
if len(clientConnection.Kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
if len(kubeconfig) != 0 {
master, err := GetMasterFromKubeconfig(kubeconfig)
if err != nil {
return nil, fmt.Errorf("failed to parse kubeconfig file: %v ", err)
return nil, fmt.Errorf("Failed to parse kubeconfig file: %v ", err)
}
cfg, err = clientcmd.BuildConfigFromFlags(master, clientConnection.Kubeconfig)
cfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
if err != nil {
return nil, fmt.Errorf("unable to build config: %v", err)
return nil, fmt.Errorf("Unable to build config: %v", err)
}
} else {
var err error
cfg, err = rest.InClusterConfig()
if err != nil {
return nil, fmt.Errorf("unable to build in cluster config: %v", err)
return nil, fmt.Errorf("Unable to build in cluster config: %v", err)
}
}
cfg.Burst = int(clientConnection.Burst)
cfg.QPS = clientConnection.QPS
if len(userAgt) != 0 {
cfg = rest.AddUserAgent(cfg, userAgt)
}
return clientset.NewForConfig(cfg)
}
@@ -67,11 +58,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
context, ok := config.Contexts[config.CurrentContext]
if !ok {
return "", fmt.Errorf("failed to get master address from kubeconfig")
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}
if val, ok := config.Clusters[context.Cluster]; ok {
return val.Server, nil
}
return "", fmt.Errorf("failed to get master address from kubeconfig")
return "", fmt.Errorf("Failed to get master address from kubeconfig")
}

View File

@@ -18,222 +18,45 @@ package descheduler
import (
"context"
"errors"
"fmt"
"math"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"k8s.io/client-go/discovery"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/events"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
listersv1 "k8s.io/client-go/listers/core/v1"
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
"sigs.k8s.io/descheduler/pkg/tracing"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/pkg/version"
corev1informers "k8s.io/client-go/informers/core/v1"
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
)
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
type profileRunner struct {
name string
descheduleEPs, balanceEPs eprunner
}
type descheduler struct {
rs *options.DeschedulerServer
podLister listersv1.PodLister
nodeLister listersv1.NodeLister
namespaceLister listersv1.NamespaceLister
priorityClassLister schedulingv1.PriorityClassLister
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
sharedInformerFactory informers.SharedInformerFactory
evictionPolicyGroupVersion string
deschedulerPolicy *api.DeschedulerPolicy
eventRecorder events.EventRecorder
}
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
}
return &descheduler{
rs: rs,
podLister: podLister,
nodeLister: nodeLister,
namespaceLister: namespaceLister,
priorityClassLister: priorityClassLister,
getPodsAssignedToNode: getPodsAssignedToNode,
sharedInformerFactory: sharedInformerFactory,
evictionPolicyGroupVersion: evictionPolicyGroupVersion,
deschedulerPolicy: deschedulerPolicy,
eventRecorder: eventRecorder,
}, nil
}
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
defer span.End()
defer func(loopStartDuration time.Time) {
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
}(time.Now())
// if len is still <= 1 error out
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
return fmt.Errorf("the cluster size is 0 or 1")
}
var client clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if d.rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(d.rs.Client, d.podLister, d.nodeLister, d.namespaceLister, d.priorityClassLister)
if err != nil {
return err
}
// create a new instance of the shared informer factor from the cached client
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
// register the pod informer, otherwise it will not get running
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
client = fakeClient
d.sharedInformerFactory = fakeSharedInformerFactory
} else {
client = d.rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
client,
d.evictionPolicyGroupVersion,
d.rs.DryRun,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
d.deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
!d.rs.DisableMetrics,
d.eventRecorder,
)
d.runProfiles(ctx, client, nodes, podEvictor)
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
return nil
}
// runProfiles runs all the deschedule plugins of all profiles and
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
// see https://github.com/kubernetes-sigs/descheduler/issues/979
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
defer span.End()
var profileRunners []profileRunner
for _, profile := range d.deschedulerPolicy.Profiles {
currProfile, err := frameworkprofile.NewProfile(
profile,
pluginregistry.PluginRegistry,
frameworkprofile.WithClientSet(client),
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
frameworkprofile.WithPodEvictor(podEvictor),
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
)
if err != nil {
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
continue
}
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
}
for _, profileR := range profileRunners {
// First deschedule
status := profileR.descheduleEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
continue
}
}
for _, profileR := range profileRunners {
// Balance Later
status := profileR.balanceEPs(ctx, nodes)
if status != nil && status.Err != nil {
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
continue
}
}
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "Run")
defer span.End()
metrics.Register()
clientConnection := rs.ClientConnection
if rs.KubeconfigFile != "" && clientConnection.Kubeconfig == "" {
clientConnection.Kubeconfig = rs.KubeconfigFile
}
rsclient, eventClient, err := createClients(clientConnection)
rsclient, err := client.CreateClient(rs.KubeconfigFile)
if err != nil {
return err
}
rs.Client = rsclient
rs.EventClient = eventClient
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginregistry.PluginRegistry)
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile)
if err != nil {
return err
}
@@ -241,76 +64,22 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return fmt.Errorf("deschedulerPolicy is nil")
}
// Add k8s compatibility warnings to logs
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
klog.Warning(err.Error())
}
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
return err
}
runFn := func() error {
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
}
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
}
if rs.LeaderElection.LeaderElect && rs.DryRun {
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
}
if rs.LeaderElection.LeaderElect && !rs.DryRun {
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
return fmt.Errorf("leaderElection: %w", err)
}
return nil
}
return runFn()
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
}
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, versionInfo version.Info) error {
serverVersionInfo, err := discovery.ServerVersion()
if err != nil {
return errors.New("failed to discover Kubernetes server version")
}
serverVersion, err := utilversion.ParseSemantic(serverVersionInfo.String())
if err != nil {
return errors.New("failed to parse Kubernetes server version")
}
deschedulerVersion, err := utilversion.ParseGeneric(versionInfo.GitVersion)
if err != nil {
return errors.New("failed to convert Descheduler minor version to float")
}
deschedulerMinor := float64(deschedulerVersion.Minor())
serverMinor := float64(serverVersion.Minor())
if math.Abs(deschedulerMinor-serverMinor) > 3 {
return fmt.Errorf(
"descheduler version %v may not be supported on your version of Kubernetes %v."+
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
deschedulerVersion.String(),
serverVersionInfo.String(),
)
}
return nil
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func cachedClient(
realClient clientset.Interface,
podLister listersv1.PodLister,
nodeLister listersv1.NodeLister,
namespaceLister listersv1.NamespaceLister,
priorityClassLister schedulingv1.PriorityClassLister,
podInformer corev1informers.PodInformer,
nodeInformer corev1informers.NodeInformer,
namespaceInformer corev1informers.NamespaceInformer,
priorityClassInformer schedulingv1informers.PriorityClassInformer,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
@@ -334,7 +103,7 @@ func cachedClient(
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podLister.List(labels.Everything())
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
@@ -345,7 +114,7 @@ func cachedClient(
}
}
nodes, err := nodeLister.List(labels.Everything())
nodes, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
@@ -356,18 +125,18 @@ func cachedClient(
}
}
namespaces, err := namespaceLister.List(labels.Everything())
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy namespace: %v", err)
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassLister.List(labels.Everything())
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
@@ -382,55 +151,142 @@ func cachedClient(
}
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
var span trace.Span
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
defer span.End()
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// create the informers
namespaceInformer.Informer()
priorityClassInformer.Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
strategyFuncs := map[api.StrategyName]strategyFunction{
"RemoveDuplicates": strategies.RemoveDuplicatePods,
"LowNodeUtilization": nodeutilization.LowNodeUtilization,
"HighNodeUtilization": nodeutilization.HighNodeUtilization,
"RemovePodsViolatingInterPodAntiAffinity": strategies.RemovePodsViolatingInterPodAntiAffinity,
"RemovePodsViolatingNodeAffinity": strategies.RemovePodsViolatingNodeAffinity,
"RemovePodsViolatingNodeTaints": strategies.RemovePodsViolatingNodeTaints,
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
"PodLifeTime": strategies.PodLifeTime,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
"RemoveFailedPods": strategies.RemoveFailedPods,
}
var nodeSelector string
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
var eventClient clientset.Interface
if rs.DryRun {
eventClient = fakeclientset.NewSimpleClientset()
} else {
eventClient = rs.Client
var evictLocalStoragePods bool
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
defer eventBroadcaster.Shutdown()
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory)
if err != nil {
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
return err
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
if evictSystemCriticalPods {
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
}
}
ignorePvcPods := false
if deschedulerPolicy.IgnorePVCPods != nil {
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
wait.NonSlidingUntil(func() {
// A next context is created here intentionally to avoid nesting the spans via context.
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
defer sSpan.End()
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, nodeLister, nodeSelector)
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil {
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
cancel()
return
}
err = descheduler.runDeschedulerLoop(sCtx, nodes)
if err != nil {
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
klog.Error(err)
if len(nodes) <= 1 {
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
cancel()
return
}
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
podEvictorClient,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
nodes,
evictLocalStoragePods,
evictSystemCriticalPods,
ignorePvcPods,
evictBarePods,
!rs.DisableMetrics,
)
for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)
}
}
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
if rs.DeschedulingInterval.Seconds() == 0 {
cancel()
@@ -439,26 +295,3 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return nil
}
func GetPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
for idx, pluginConfig := range pluginConfigs {
if pluginConfig.Name == pluginName {
return &pluginConfig, idx
}
}
return nil, 0
}
func createClients(clientConnection componentbaseconfig.ClientConnectionConfiguration) (clientset.Interface, clientset.Interface, error) {
kClient, err := client.CreateClient(clientConnection, "descheduler")
if err != nil {
return nil, nil, err
}
eventClient, err := client.CreateClient(clientConnection, "")
if err != nil {
return nil, nil, err
}
return kClient, eventClient, nil
}

View File

@@ -3,50 +3,20 @@ package descheduler
import (
"context"
"fmt"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
apiversion "k8s.io/apimachinery/pkg/version"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/apimachinery/pkg/util/wait"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
"sigs.k8s.io/descheduler/test"
)
// scope contains information about an ongoing conversion.
type scope struct {
converter *conversion.Converter
meta *conversion.Meta
}
// Convert continues a conversion.
func (s scope) Convert(src, dest interface{}) error {
return s.converter.Convert(src, dest, s.meta)
}
// Meta returns the meta object that was originally passed to Convert.
func (s scope) Meta() *conversion.Meta {
return s.meta
}
func TestTaintsUpdated(t *testing.T) {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
ctx := context.Background()
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
@@ -57,10 +27,9 @@ func TestTaintsUpdated(t *testing.T) {
}
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
dp := &v1alpha1.DeschedulerPolicy{
Strategies: v1alpha1.StrategyList{
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
dp := &api.DeschedulerPolicy{
Strategies: api.StrategyList{
"RemovePodsViolatingNodeTaints": api.DeschedulerStrategy{
Enabled: true,
},
},
@@ -71,7 +40,21 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err
}()
select {
case err := <-errChan:
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
// Wait for few cycles and then verify the only pod still exists
}
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
@@ -94,87 +77,25 @@ func TestTaintsUpdated(t *testing.T) {
t.Fatalf("Unable to update node: %v\n", err)
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
if err := wait.PollImmediate(100*time.Millisecond, time.Second, func() (bool, error) {
// Get over evicted pod result in panic
//pods, err := client.CoreV1().Pods(p1.Namespace).Get(p1.Name, metav1.GetOptions{})
// List is better, it does not panic.
// Though once the pod is evicted, List starts to error with "can't assign or convert v1beta1.Eviction into v1.Pod"
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err == nil {
if len(pods.Items) > 0 {
return false, nil
}
return true, nil
}
if strings.Contains(err.Error(), "can't assign or convert v1beta1.Eviction into v1.Pod") {
return true, nil
}
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
scope := scope{}
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
if err != nil {
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
}
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) != 1 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
}
}
func TestDuplicate(t *testing.T) {
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
p1.Namespace = "dev"
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
p2.Namespace = "dev"
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
p3.Namespace = "dev"
ownerRef1 := test.GetReplicaSetOwnerRefList()
p1.ObjectMeta.OwnerReferences = ownerRef1
p2.ObjectMeta.OwnerReferences = ownerRef1
p3.ObjectMeta.OwnerReferences = ownerRef1
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
dp := &v1alpha1.DeschedulerPolicy{
Strategies: v1alpha1.StrategyList{
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
Enabled: true,
},
},
}
rs, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Unable to list pods: %v", err)
}
if len(pods.Items) != 3 {
t.Errorf("Pods number should be 3 before evict")
}
var evictedPods []string
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
internalDeschedulerPolicy := &api.DeschedulerPolicy{}
scope := scope{}
err = v1alpha1.V1alpha1ToInternal(dp, pluginregistry.PluginRegistry, internalDeschedulerPolicy, scope)
if err != nil {
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
}
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
if len(evictedPods) == 0 {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
return false, nil
}); err != nil {
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies")
}
}
@@ -183,9 +104,8 @@ func TestRootCancel(t *testing.T) {
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{}, // no strategies needed for this test
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
@@ -193,13 +113,12 @@ func TestRootCancel(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 100 * time.Millisecond
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err
}()
cancel()
@@ -218,9 +137,8 @@ func TestRootCancelWithNoInterval(t *testing.T) {
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
client := fakeclientset.NewSimpleClientset(n1, n2)
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
dp := &api.DeschedulerPolicy{
Profiles: []api.DeschedulerProfile{}, // no strategies needed for this test
Strategies: api.StrategyList{}, // no strategies needed for this test
}
rs, err := options.NewDeschedulerServer()
@@ -228,13 +146,12 @@ func TestRootCancelWithNoInterval(t *testing.T) {
t.Fatalf("Unable to initialize server: %v", err)
}
rs.Client = client
rs.EventClient = eventClient
rs.DeschedulingInterval = 0
errChan := make(chan error, 1)
defer close(errChan)
go func() {
err := RunDeschedulerStrategies(ctx, rs, dp, "v1")
err := RunDeschedulerStrategies(ctx, rs, dp, "v1beta1")
errChan <- err
}()
cancel()
@@ -247,79 +164,3 @@ func TestRootCancelWithNoInterval(t *testing.T) {
t.Fatal("Root ctx should have canceled immediately")
}
}
func TestValidateVersionCompatibility(t *testing.T) {
type testCase struct {
name string
deschedulerVersion string
serverVersion string
expectError bool
}
testCases := []testCase{
{
name: "no error when descheduler minor equals to server minor",
deschedulerVersion: "v0.26",
serverVersion: "v1.26.1",
expectError: false,
},
{
name: "no error when descheduler minor is 3 behind server minor",
deschedulerVersion: "0.23",
serverVersion: "v1.26.1",
expectError: false,
},
{
name: "no error when descheduler minor is 3 ahead of server minor",
deschedulerVersion: "v0.26",
serverVersion: "v1.26.1",
expectError: false,
},
{
name: "error when descheduler minor is 4 behind server minor",
deschedulerVersion: "v0.22",
serverVersion: "v1.26.1",
expectError: true,
},
{
name: "error when descheduler minor is 4 ahead of server minor",
deschedulerVersion: "v0.27",
serverVersion: "v1.23.1",
expectError: true,
},
{
name: "no error when using managed provider version",
deschedulerVersion: "v0.25",
serverVersion: "v1.25.12-eks-2d98532",
expectError: false,
},
}
client := fakeclientset.NewSimpleClientset()
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
deschedulerVersion := deschedulerversion.Info{GitVersion: tc.deschedulerVersion}
err := validateVersionCompatibility(fakeDiscovery, deschedulerVersion)
hasError := err != nil
if tc.expectError != hasError {
t.Error("unexpected version compatibility behavior")
}
})
}
}
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
return func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
*evictedPods = append(*evictedPods, eviction.GetName())
}
}
return false, nil, nil // fallback to the default reactor
}
}

Some files were not shown because too many files have changed in this diff Show More