mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f1b31fcfc | ||
|
|
bd9dea9979 |
6
.gitattributes
vendored
6
.gitattributes
vendored
@@ -1,6 +0,0 @@
|
||||
# Always check-out / check-in files with LF line endings.
|
||||
* text=auto eol=lf
|
||||
|
||||
go.sum merge=union
|
||||
**/zz_generated.*.go linguist-generated=true
|
||||
vendor/** linguist-vendored
|
||||
35
.github/workflows/helm.yaml
vendored
35
.github/workflows/helm.yaml
vendored
@@ -20,42 +20,34 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
uses: azure/setup-helm@v2.1
|
||||
with:
|
||||
version: v3.15.1
|
||||
version: v3.9.2
|
||||
|
||||
- uses: actions/setup-python@v5.1.1
|
||||
- uses: actions/setup-python@v3.1.2
|
||||
with:
|
||||
python-version: 3.12
|
||||
python-version: 3.7
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.19.3'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
with:
|
||||
version: v3.11.0
|
||||
|
||||
- name: Install Helm Unit Test Plugin
|
||||
run: |
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest
|
||||
|
||||
- name: Run Helm Unit Tests
|
||||
run: |
|
||||
helm unittest charts/descheduler --strict -d
|
||||
version: v3.7.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
@@ -68,3 +60,10 @@ jobs:
|
||||
# helm-extra-set-args only available after ct 3.6.0
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'
|
||||
|
||||
- name: E2E after chart install
|
||||
env:
|
||||
KUBERNETES_VERSION: "v1.26.0"
|
||||
KIND_E2E: true
|
||||
SKIP_INSTALL: true
|
||||
run: make test-e2e
|
||||
|
||||
44
.github/workflows/manifests.yaml
vendored
44
.github/workflows/manifests.yaml
vendored
@@ -1,44 +0,0 @@
|
||||
name: manifests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.33.0"]
|
||||
descheduler-version: ["v0.33.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.27.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
version: ${{ matrix.kind-version }}
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Build image
|
||||
run: |
|
||||
VERSION="dev" make dev-image
|
||||
docker tag descheduler:dev registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }}
|
||||
- name: Kind load image
|
||||
run: |
|
||||
kind load docker-image registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }} --name chart-testing
|
||||
- name: Create k8s manifests
|
||||
run: |
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f test/manifests/${{ matrix.descheduler-api }}/configmap.yaml
|
||||
kubectl create -f kubernetes/${{ matrix.manifest }}/${{ matrix.manifest }}.yaml
|
||||
- name: Wait for ready condition
|
||||
run: |
|
||||
kubectl wait --for=condition=Available --timeout=60s ${{ matrix.manifest }} descheduler -n kube-system
|
||||
9
.github/workflows/release.yaml
vendored
9
.github/workflows/release.yaml
vendored
@@ -5,9 +5,6 @@ on:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
permissions:
|
||||
contents: write # allow actions to update gh-pages branch
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -23,12 +20,12 @@ jobs:
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.15.1
|
||||
version: v3.4.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
uses: helm/chart-releaser-action@v1.1.0
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
2
.github/workflows/security.yaml
vendored
2
.github/workflows/security.yaml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
deadline: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
@@ -15,4 +15,4 @@ linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.24.2
|
||||
FROM golang:1.19.3
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
@@ -21,9 +21,7 @@ RUN VERSION=${VERSION} make build.$ARCH
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
MAINTAINER Kubernetes SIG Scheduling <kubernetes-sig-scheduling@googlegroups.com>
|
||||
|
||||
USER 1000
|
||||
|
||||
|
||||
24
Makefile
24
Makefile
@@ -26,14 +26,12 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.64.8
|
||||
GOLANGCI_VERSION := v1.49.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
GOFUMPT_VERSION := v0.4.0
|
||||
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||
|
||||
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
# registry, not production.
|
||||
@@ -45,10 +43,6 @@ IMAGE:=descheduler:$(VERSION)
|
||||
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||
|
||||
# CURRENT_DIR is the current dir where the Makefile exists
|
||||
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
|
||||
# TODO: upload binaries to GCS bucket
|
||||
#
|
||||
# In the future binaries can be uploaded to
|
||||
@@ -106,7 +100,7 @@ clean:
|
||||
rm -rf _output
|
||||
rm -rf _tmp
|
||||
|
||||
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-gen
|
||||
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-toc verify-gen
|
||||
|
||||
verify-govet:
|
||||
./hack/verify-govet.sh
|
||||
@@ -120,8 +114,8 @@ verify-gofmt:
|
||||
verify-vendor:
|
||||
./hack/verify-vendor.sh
|
||||
|
||||
verify-docs:
|
||||
./hack/verify-docs.sh
|
||||
verify-toc:
|
||||
./hack/verify-toc.sh
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
@@ -133,22 +127,18 @@ gen:
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
|
||||
./hack/update-toc.sh
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
./hack/verify-defaulters.sh
|
||||
./hack/verify-docs.sh
|
||||
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run -v
|
||||
./_output/bin/golangci-lint run
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
|
||||
4
OWNERS
4
OWNERS
@@ -3,7 +3,6 @@ approvers:
|
||||
- ingvagabund
|
||||
- seanmalloy
|
||||
- a7i
|
||||
- knelasevero
|
||||
reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
@@ -12,9 +11,6 @@ reviewers:
|
||||
- a7i
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
- googs1025
|
||||
- ricardomaraschini
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.32.0
|
||||
appVersion: 0.32.0
|
||||
version: 0.26.0
|
||||
appVersion: 0.26.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
@@ -13,4 +13,4 @@ sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: sig-scheduling@kubernetes.io
|
||||
email: kubernetes-sig-scheduling@googlegroups.com
|
||||
|
||||
@@ -11,7 +11,7 @@ helm install my-release --namespace kube-system descheduler/descheduler
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -52,11 +52,9 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
@@ -64,6 +62,7 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
@@ -76,7 +75,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
@@ -84,7 +82,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq (.Values.replicas | int) 1 }}
|
||||
{{- if eq .Values.replicas 1.0}}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- if .Values.leaderElection }}
|
||||
@@ -10,13 +10,3 @@ WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
A DeschedulerPolicy has been applied for you. You can view the policy with:
|
||||
|
||||
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
|
||||
|
||||
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
|
||||
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
|
||||
|
||||
deschedulerPolicy: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,14 +24,6 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the namespace of the release.
|
||||
Allows overriding it for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "descheduler.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
@@ -95,10 +87,8 @@ Leader Election
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
|
||||
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
|
||||
{{- if $resourceNamespace -}}
|
||||
- --leader-elect-resource-namespace={{ $resourceNamespace }}
|
||||
{{- if .Values.leaderElection.resourceNamescape }}
|
||||
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,9 +24,6 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
@@ -36,13 +33,4 @@ rules:
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy }}
|
||||
{{- range .Values.deschedulerPolicy.metricsProviders }}
|
||||
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -12,5 +12,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
@@ -15,15 +15,12 @@ spec:
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
{{- if .Values.successfulJobsHistoryLimit }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
{{- if .Values.failedJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
@@ -51,14 +48,6 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
@@ -77,33 +66,31 @@ spec:
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 16 }}
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 16 }}
|
||||
{{- end }}
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
|
||||
@@ -3,11 +3,11 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if gt .Values.replicas 1.0}}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
@@ -31,54 +31,50 @@ spec:
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
@@ -91,10 +87,6 @@ spec:
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -6,15 +6,9 @@ metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilies }}
|
||||
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10258
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
|
||||
@@ -7,14 +7,11 @@ metadata:
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "descheduler.namespace" . }}
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
|
||||
1
charts/descheduler/tests/.gitignore
vendored
1
charts/descheduler/tests/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
__snapshot__
|
||||
@@ -1,17 +0,0 @@
|
||||
suite: Test Descheduler CronJob
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: creates CronJob when kind is set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: CronJob
|
||||
@@ -1,49 +0,0 @@
|
||||
suite: Test Descheduler Deployment
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: Deployment
|
||||
|
||||
tests:
|
||||
- it: creates Deployment when kind is set
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
|
||||
- it: enables leader-election
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect=true
|
||||
|
||||
- it: support leader-election resourceNamespace
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamespace: test
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=test
|
||||
|
||||
- it: support legacy leader-election resourceNamescape
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamescape: typo
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=typo
|
||||
@@ -18,34 +18,13 @@ resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 1000
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
@@ -56,7 +35,6 @@ suspend: false
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
@@ -77,75 +55,51 @@ leaderElection: {}
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamespace: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsProviders:
|
||||
# - source: KubernetesMetrics
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
# serviceName: ""
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeAffinity
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsHavingTooManyRestarts:
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
RemovePodsViolatingTopologySpreadConstraint:
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: false
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
@@ -171,13 +125,6 @@ affinity: {}
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
@@ -201,8 +148,6 @@ podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -214,24 +159,11 @@ livenessProbe:
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
|
||||
#
|
||||
ipFamilyPolicy: ""
|
||||
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
# E.g.
|
||||
# ipFamilies:
|
||||
# - IPv6
|
||||
# - IPv4
|
||||
ipFamilies: []
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
additionalLabels: {}
|
||||
# prometheus: kube-prometheus-stack
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
|
||||
@@ -18,30 +18,17 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,18 +39,10 @@ const (
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
PrometheusClient promapi.Client
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
// FeatureGates enabled by the user
|
||||
FeatureGates map[string]bool
|
||||
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
|
||||
DefaultFeatureGates featuregate.FeatureGate
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
DisableMetrics bool
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
@@ -95,9 +74,7 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
},
|
||||
}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{
|
||||
Tracing: componentconfig.TracingConfiguration{},
|
||||
}
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,46 +83,14 @@ func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, err
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&rs.Logging.Format, "logging-format", "text", `Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning.`)
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
|
||||
fs.Float32Var(&rs.ClientConnection.QPS, "client-connection-qps", rs.ClientConnection.QPS, "QPS to use for interacting with kubernetes apiserver.")
|
||||
fs.Int32Var(&rs.ClientConnection.Burst, "client-connection-burst", rs.ClientConnection.Burst, "Burst to use for interacting with kubernetes apiserver.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
|
||||
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
|
||||
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
|
||||
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
func (rs *DeschedulerServer) Apply() error {
|
||||
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
|
||||
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return err
|
||||
}
|
||||
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,22 +20,22 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
jsonLog "k8s.io/component-base/logs/json"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -48,78 +48,73 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
featureGate := featuregate.NewFeatureGate()
|
||||
logConfig := logsapi.NewLoggingConfiguration()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: "The descheduler evicts pods which may be bound to less desired nodes",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
logs.InitLogs()
|
||||
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
|
||||
return err
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// s.Logs.Config.Format = s.Logging.Format
|
||||
|
||||
// LoopbackClientConfig is a config for a privileged loopback connection
|
||||
var LoopbackClientConfig *restclient.Config
|
||||
var SecureServing *apiserver.SecureServingInfo
|
||||
if err := s.SecureServing.ApplyTo(&SecureServing, &LoopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return
|
||||
}
|
||||
descheduler.SetupPlugins()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err = s.Apply(); err != nil {
|
||||
klog.ErrorS(err, "failed to apply")
|
||||
return err
|
||||
var factory registry.LogFormatFactory
|
||||
|
||||
if s.Logging.Format == "json" {
|
||||
factory = jsonLog.Factory{}
|
||||
}
|
||||
|
||||
if err = Run(cmd.Context(), s); err != nil {
|
||||
klog.ErrorS(err, "failed to run descheduler server")
|
||||
return err
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, logrFlush := factory.Create(registry.LoggingConfiguration{
|
||||
Format: s.Logging.Format,
|
||||
Verbosity: s.Logging.Verbosity,
|
||||
})
|
||||
defer logrFlush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
|
||||
return nil
|
||||
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !s.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = Run(ctx, s)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "descheduler server")
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
},
|
||||
}
|
||||
cmd.SetOut(out)
|
||||
flags := cmd.Flags()
|
||||
s.AddFlags(flags)
|
||||
|
||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
||||
logsapi.AddFlags(logConfig, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !rs.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer tracing.Shutdown(ctx)
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
err = descheduler.Run(ctx, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
|
||||
return nil
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return descheduler.Run(ctx, rs)
|
||||
}
|
||||
|
||||
func SetupLogs() {
|
||||
klog.SetOutput(os.Stdout)
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
@@ -21,8 +21,14 @@ import (
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
|
||||
func init() {
|
||||
app.SetupLogs()
|
||||
descheduler.SetupPlugins()
|
||||
}
|
||||
|
||||
func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
## descheduler
|
||||
|
||||
descheduler
|
||||
|
||||
### Synopsis
|
||||
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
```
|
||||
descheduler [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
|
||||
AllAlpha=true|false (ALPHA - default=false)
|
||||
AllBeta=true|false (BETA - default=false)
|
||||
EvictionsInBackground=true|false (ALPHA - default=false)
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
--otel-sample-rate float Sample rate to collect the Traces (default 1)
|
||||
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
|
||||
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
|
||||
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [descheduler version](descheduler_version.md) - Version of descheduler
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
## descheduler version
|
||||
|
||||
Version of descheduler
|
||||
|
||||
### Synopsis
|
||||
|
||||
Prints the version of descheduler.
|
||||
|
||||
```
|
||||
descheduler version [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for version
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [descheduler](descheduler.md) - descheduler
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.23+](https://golang.org/dl/)
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
@@ -20,7 +20,7 @@ make
|
||||
|
||||
Run descheduler.
|
||||
```sh
|
||||
./_output/bin/descheduler --client-connection-kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
```
|
||||
|
||||
View all CLI options.
|
||||
|
||||
@@ -26,7 +26,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||
4. Cut release branch from `master`, eg `release-1.24`
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
**Patch release**
|
||||
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||
@@ -34,7 +34,7 @@ When the above pre-release steps are complete and the release is ready to be cut
|
||||
3. Merge Helm chart version update to release branch
|
||||
4. Perform the image promotion process for the patch version
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
6. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
### Flowchart
|
||||
|
||||
|
||||
@@ -4,12 +4,17 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.1 | registry.k8s.io/descheduler/descheduler:v0.25.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.25.0 | registry.k8s.io/descheduler/descheduler:v0.25.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.1 | registry.k8s.io/descheduler/descheduler:v0.24.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.0 | registry.k8s.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.23.1 | registry.k8s.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.22.0 | registry.k8s.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.21.0 | registry.k8s.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.20.0 | registry.k8s.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
|
||||
v0.19.0 | registry.k8s.io/descheduler/descheduler:v0.19.0 | AMD64 |
|
||||
v0.18.0 | registry.k8s.io/descheduler/descheduler:v0.18.0 | AMD64 |
|
||||
v0.10.0 | registry.k8s.io/descheduler/descheduler:v0.10.0 | AMD64 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
@@ -24,7 +29,65 @@ kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||
|
||||
## CLI Options
|
||||
The descheduler has many CLI options that can be used to override its default behavior. Please check the [CLI Options](./cli/descheduler.md) documentation for details
|
||||
The descheduler has many CLI options that can be used to override its default behavior.
|
||||
```
|
||||
descheduler --help
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
Usage:
|
||||
descheduler [flags]
|
||||
descheduler [command]
|
||||
|
||||
Available Commands:
|
||||
completion generate the autocompletion script for the specified shell
|
||||
help Help about any command
|
||||
version Version of descheduler
|
||||
|
||||
Flags:
|
||||
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log_dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log_file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
|
||||
Use "descheduler [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Production Use Cases
|
||||
This section contains descriptions of real world production use cases.
|
||||
@@ -46,23 +109,19 @@ descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.y
|
||||
This policy configuration file ensures that pods created more than 7 days ago are evicted.
|
||||
```
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "PodLifeTime"
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "PodLifeTime"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
or `number of pods`.
|
||||
|
||||
#### Balance high utilization nodes
|
||||
@@ -70,21 +129,17 @@ Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memo
|
||||
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
```
|
||||
|
||||
#### Balance low utilization nodes
|
||||
@@ -93,19 +148,15 @@ from nodes with memory utilization lower than 20%. This should be use `NodeResou
|
||||
The evicted pods will be compacted into minimal set of nodes.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "HighNodeUtilization"
|
||||
args:
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "HighNodeUtilization"
|
||||
```
|
||||
|
||||
### Autoheal Node Problems
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemoveFailedPods"
|
||||
args:
|
||||
strategies:
|
||||
"RemoveFailedPods":
|
||||
enabled: true
|
||||
params:
|
||||
failedPods:
|
||||
reasons:
|
||||
- "OutOfcpu"
|
||||
- "CreateContainerConfigError"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemoveFailedPods"
|
||||
minPodLifetimeSeconds: 3600 # 1 hour
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "HighNodeUtilization"
|
||||
args:
|
||||
strategies:
|
||||
"HighNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "HighNodeUtilization"
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingNodeAffinity"
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeAffinity"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "PodLifeTime"
|
||||
args:
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states:
|
||||
- "Pending"
|
||||
- "PodInitializing"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "PodLifeTime"
|
||||
|
||||
@@ -1,36 +1,29 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
balance:
|
||||
enabled:
|
||||
- "RemoveDuplicates"
|
||||
- "LowNodeUtilization"
|
||||
- "RemovePodsViolatingTopologySpreadConstraint"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
includeSoftConstraints: true
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
states:
|
||||
- CrashLoopBackOff
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
@@ -1,14 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "RemovePodsViolatingTopologySpreadConstraint"
|
||||
strategies:
|
||||
"RemovePodsViolatingTopologySpreadConstraint":
|
||||
enabled: true
|
||||
params:
|
||||
nodeFit: true
|
||||
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints
|
||||
|
||||
189
go.mod
189
go.mod
@@ -1,133 +1,110 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.24.2
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.62.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/spf13/cobra v1.6.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.opentelemetry.io/otel v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
||||
go.opentelemetry.io/otel/sdk v1.33.0
|
||||
go.opentelemetry.io/otel/trace v1.33.0
|
||||
google.golang.org/grpc v1.68.1
|
||||
k8s.io/api v0.33.0
|
||||
k8s.io/apimachinery v0.33.0
|
||||
k8s.io/apiserver v0.33.0
|
||||
k8s.io/client-go v0.33.0
|
||||
k8s.io/code-generator v0.33.0
|
||||
k8s.io/component-base v0.33.0
|
||||
k8s.io/component-helpers v0.33.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.33.0
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
k8s.io/api v0.26.0
|
||||
k8s.io/apimachinery v0.26.0
|
||||
k8s.io/apiserver v0.26.0
|
||||
k8s.io/client-go v0.26.0
|
||||
k8s.io/code-generator v0.26.0
|
||||
k8s.io/component-base v0.26.0
|
||||
k8s.io/component-helpers v0.26.0
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
|
||||
sigs.k8s.io/mdtoc v1.0.1
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.1 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-logr/zapr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.4 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.23.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
||||
github.com/google/cel-go v0.12.5 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.36.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sync v0.12.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
|
||||
k8s.io/kms v0.33.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
|
||||
k8s.io/kms v0.26.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks them out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
declare -r REPO_ROOT
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r STARTINGBRANCH
|
||||
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
|
||||
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh > /dev/null; then
|
||||
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " to override the default remote names to what you have locally."
|
||||
echo
|
||||
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Checks if you are logged in. Will error/bail if you are not.
|
||||
gh auth status
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLDASH
|
||||
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
declare -r PULLSUBJ
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCHREQ
|
||||
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCH
|
||||
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
declare -r NEWBRANCHUNIQ
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel
|
||||
rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
local numandtitle
|
||||
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
prtext=$(cat <<EOF
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
${numandtitle}
|
||||
|
||||
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
|
||||
|
||||
\`\`\`release-note
|
||||
|
||||
\`\`\`
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
|
||||
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
||||
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
var docGenPath = "docs/cli"
|
||||
|
||||
func main() {
|
||||
cmd := app.NewDeschedulerCommand(os.Stdout)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
cmd.DisableAutoGenTag = true // Disable this so that the diff wont track it
|
||||
if err := doc.GenMarkdownTree(cmd, docGenPath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,6 @@ function find_dirs_containing_comment_tags() {
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
IFS=" ";
|
||||
IFS=",";
|
||||
printf '%s' "${array[*]}";
|
||||
}
|
||||
|
||||
@@ -6,5 +6,5 @@ go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/con
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--output-file zz_generated.conversion.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")" \
|
||||
--output-file-base zz_generated.conversion
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepc
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
|
||||
|
||||
@@ -6,6 +6,6 @@ go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defa
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
|
||||
--output-file zz_generated.defaults.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
|
||||
--input-dirs "$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
|
||||
@@ -20,9 +20,13 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
go::verify_version
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -18,6 +18,8 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go run ${SCRIPT_DIR}/doc-gen
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/mdtoc --inplace README.md
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
temp_dir=$(mktemp -d)
|
||||
|
||||
go run -ldflags "-X main.docGenPath=${temp_dir}" ${SCRIPT_DIR}/doc-gen
|
||||
|
||||
if ! _out="$(diff -Naupr ${SCRIPT_DIR}/../docs/cli "${temp_dir}")"; then
|
||||
echo "Generated output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated conversions verify failed. Please run ./hack/update-docs.sh"
|
||||
rm -rf ${temp_dir}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf ${temp_dir}
|
||||
@@ -20,9 +20,13 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
go::verify_version
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18|go1.19') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
23
hack/lib/go.sh → hack/verify-toc.sh
Normal file → Executable file
23
hack/lib/go.sh → hack/verify-toc.sh
Normal file → Executable file
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -14,13 +14,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# go::verify_version verifies the go version is supported by the project.
|
||||
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/mdtoc" "sigs.k8s.io/mdtoc"
|
||||
|
||||
if ! ${OS_OUTPUT_BINPATH}/mdtoc --inplace --dryrun README.md
|
||||
then
|
||||
echo "ERROR: Changes detected to table of contents. Run ./hack/update-toc.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
title: descheduler integration with evacuation API as an alternative to eviction API
|
||||
kep-number: 1397
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
owning-sig: sig-scheduling
|
||||
participating-sigs:
|
||||
- sig-apps
|
||||
status: provisional
|
||||
creation-date: 2024-04-14
|
||||
reviewers:
|
||||
- atiratree
|
||||
approvers:
|
||||
- TBD
|
||||
feature-gates:
|
||||
- TBD
|
||||
stage: alpha
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 48 KiB |
@@ -1,29 +0,0 @@
|
||||
title: Descheduling framework
|
||||
kep-number: 753
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
owning-sig: sig-scheduling
|
||||
status: provisional
|
||||
creation-date: 2024-04-08
|
||||
reviewers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
approvers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
#replaces:
|
||||
|
||||
# The target maturity stage in the current dev cycle for this KEP.
|
||||
stage: alpha
|
||||
|
||||
# The most recent milestone for which work toward delivery of this KEP has been
|
||||
# done. This can be the current (upcoming) milestone, if it is being actively
|
||||
# worked on.
|
||||
# latest-milestone: "v1.19"
|
||||
|
||||
# disable-supported: true
|
||||
@@ -6,29 +6,23 @@ metadata:
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
- "RemoveDuplicates"
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
|
||||
|
||||
@@ -22,28 +22,13 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -63,16 +48,3 @@ subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: descheduler-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: descheduler-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.25.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.25.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.33.0
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.25.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -36,7 +36,7 @@ var (
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
}, []string{"result", "strategy", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
@@ -48,29 +48,9 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
DeschedulerLoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
|
||||
DeschedulerStrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
buildInfo,
|
||||
DeschedulerLoopDuration,
|
||||
DeschedulerStrategyDuration,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
@@ -15,7 +15,7 @@ package api
|
||||
|
||||
import "sort"
|
||||
|
||||
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
|
||||
func SortProfilesByName(profiles []Profile) []Profile {
|
||||
sort.Slice(profiles, func(i, j int) bool {
|
||||
return profiles[i].Name < profiles[j].Name
|
||||
})
|
||||
|
||||
@@ -18,7 +18,6 @@ package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
@@ -29,7 +28,7 @@ type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Profiles
|
||||
Profiles []DeschedulerProfile
|
||||
Profiles []Profile
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string
|
||||
@@ -39,39 +38,13 @@ type DeschedulerPolicy struct {
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
// EvictionLimits limits the number of evictions per domain. E.g. node, namespace, total.
|
||||
type EvictionLimits struct {
|
||||
// node restricts the maximum number of evictions per node
|
||||
Node *uint `json:"node,omitempty"`
|
||||
Include []string
|
||||
Exclude []string
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -80,11 +53,11 @@ type (
|
||||
)
|
||||
|
||||
type PriorityThreshold struct {
|
||||
Value *int32 `json:"value"`
|
||||
Name string `json:"name"`
|
||||
Value *int32
|
||||
Name string
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
type Profile struct {
|
||||
Name string
|
||||
PluginConfigs []PluginConfig
|
||||
Plugins Plugins
|
||||
@@ -100,6 +73,7 @@ type Plugins struct {
|
||||
Sort PluginSet
|
||||
Deschedule PluginSet
|
||||
Balance PluginSet
|
||||
Evict PluginSet
|
||||
Filter PluginSet
|
||||
PreEvictionFilter PluginSet
|
||||
}
|
||||
@@ -108,55 +82,3 @@ type PluginSet struct {
|
||||
Enabled []string
|
||||
Disabled []string
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from Kubernetes metrics.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus
|
||||
}
|
||||
|
||||
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
|
||||
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity
|
||||
|
||||
type Prometheus struct {
|
||||
URL string
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string
|
||||
// name is the name of the secret.
|
||||
Name string
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -15,10 +15,9 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=sigs.k8s.io/descheduler/pkg/api
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
|
||||
// Package v1alpha2 is the v1alpha2 version of the descheduler API
|
||||
// Package v1alpha1 is the v1alpha1 version of the descheduler API
|
||||
// +groupName=descheduler
|
||||
|
||||
package v1alpha2 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha2"
|
||||
package v1alpha1 // import "sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -30,7 +30,7 @@ var (
|
||||
// GroupName is the group name used in this package
|
||||
const (
|
||||
GroupName = "descheduler"
|
||||
GroupVersion = "v1alpha2"
|
||||
GroupVersion = "v1alpha1"
|
||||
)
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
129
pkg/api/v1alpha1/types.go
Normal file
129
pkg/api/v1alpha1/types.go
Normal file
@@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Strategies
|
||||
Strategies StrategyList `json:"strategies,omitempty"`
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
|
||||
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
|
||||
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictSystemCriticalPods allows eviction of pods of any priority (including Kubernetes system pods)
|
||||
EvictSystemCriticalPods *bool `json:"evictSystemCriticalPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods prevents pods with PVCs from being evicted.
|
||||
IgnorePVCPods *bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
StrategyName string
|
||||
StrategyList map[StrategyName]DeschedulerStrategy
|
||||
)
|
||||
|
||||
type DeschedulerStrategy struct {
|
||||
// Enabled or disabled
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
|
||||
// Weight
|
||||
Weight int `json:"weight,omitempty"`
|
||||
|
||||
// Strategy parameters
|
||||
Params *StrategyParameters `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// Namespaces carries a list of included/excluded namespaces
|
||||
// for which a given strategy is applicable.
|
||||
type Namespaces struct {
|
||||
Include []string `json:"include"`
|
||||
Exclude []string `json:"exclude"`
|
||||
}
|
||||
|
||||
// Besides Namespaces ThresholdPriority and ThresholdPriorityClassName only one of its members may be specified
|
||||
type StrategyParameters struct {
|
||||
NodeResourceUtilizationThresholds *NodeResourceUtilizationThresholds `json:"nodeResourceUtilizationThresholds,omitempty"`
|
||||
NodeAffinityType []string `json:"nodeAffinityType,omitempty"`
|
||||
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
|
||||
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
|
||||
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
|
||||
FailedPods *FailedPods `json:"failedPods,omitempty"`
|
||||
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
|
||||
Namespaces *Namespaces `json:"namespaces"`
|
||||
ThresholdPriority *int32 `json:"thresholdPriority"`
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
Percentage float64
|
||||
ResourceThresholds map[v1.ResourceName]Percentage
|
||||
)
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
PodRestartThreshold int32 `json:"podRestartThreshold,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
|
||||
type RemoveDuplicates struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
}
|
||||
|
||||
type PodLifeTime struct {
|
||||
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
|
||||
States []string `json:"states,omitempty"`
|
||||
|
||||
// Deprecated: Use States instead.
|
||||
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
|
||||
}
|
||||
|
||||
type FailedPods struct {
|
||||
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
|
||||
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
|
||||
Reasons []string `json:"reasons,omitempty"`
|
||||
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
|
||||
}
|
||||
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
Normal file
380
pkg/api/v1alpha1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,380 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Strategies != nil {
|
||||
in, out := &in.Strategies, &out.Strategies
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictFailedBarePods != nil {
|
||||
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictLocalStoragePods != nil {
|
||||
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictSystemCriticalPods != nil {
|
||||
in, out := &in.EvictSystemCriticalPods, &out.EvictSystemCriticalPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.IgnorePVCPods != nil {
|
||||
in, out := &in.IgnorePVCPods, &out.IgnorePVCPods
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerStrategy) DeepCopyInto(out *DeschedulerStrategy) {
|
||||
*out = *in
|
||||
if in.Params != nil {
|
||||
in, out := &in.Params, &out.Params
|
||||
*out = new(StrategyParameters)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerStrategy.
|
||||
func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerStrategy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MinPodLifetimeSeconds != nil {
|
||||
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reasons != nil {
|
||||
in, out := &in.Reasons, &out.Reasons
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
|
||||
func (in *FailedPods) DeepCopy() *FailedPods {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedPods)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Exclude != nil {
|
||||
in, out := &in.Exclude, &out.Exclude
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespaces.
|
||||
func (in *Namespaces) DeepCopy() *Namespaces {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Namespaces)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopyInto(out *NodeResourceUtilizationThresholds) {
|
||||
*out = *in
|
||||
if in.Thresholds != nil {
|
||||
in, out := &in.Thresholds, &out.Thresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TargetThresholds != nil {
|
||||
in, out := &in.TargetThresholds, &out.TargetThresholds
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceUtilizationThresholds.
|
||||
func (in *NodeResourceUtilizationThresholds) DeepCopy() *NodeResourceUtilizationThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourceUtilizationThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodLifeTime) DeepCopyInto(out *PodLifeTime) {
|
||||
*out = *in
|
||||
if in.MaxPodLifeTimeSeconds != nil {
|
||||
in, out := &in.MaxPodLifeTimeSeconds, &out.MaxPodLifeTimeSeconds
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.States != nil {
|
||||
in, out := &in.States, &out.States
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodStatusPhases != nil {
|
||||
in, out := &in.PodStatusPhases, &out.PodStatusPhases
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLifeTime.
|
||||
func (in *PodLifeTime) DeepCopy() *PodLifeTime {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodLifeTime)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopyInto(out *PodsHavingTooManyRestarts) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsHavingTooManyRestarts.
|
||||
func (in *PodsHavingTooManyRestarts) DeepCopy() *PodsHavingTooManyRestarts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PodsHavingTooManyRestarts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RemoveDuplicates) DeepCopyInto(out *RemoveDuplicates) {
|
||||
*out = *in
|
||||
if in.ExcludeOwnerKinds != nil {
|
||||
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveDuplicates.
|
||||
func (in *RemoveDuplicates) DeepCopy() *RemoveDuplicates {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RemoveDuplicates)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ResourceThresholds) DeepCopyInto(out *ResourceThresholds) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ResourceThresholds, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceThresholds.
|
||||
func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ResourceThresholds)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StrategyList) DeepCopyInto(out *StrategyList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StrategyList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = *val.DeepCopy()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyList.
|
||||
func (in StrategyList) DeepCopy() StrategyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = *in
|
||||
if in.NodeResourceUtilizationThresholds != nil {
|
||||
in, out := &in.NodeResourceUtilizationThresholds, &out.NodeResourceUtilizationThresholds
|
||||
*out = new(NodeResourceUtilizationThresholds)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinityType != nil {
|
||||
in, out := &in.NodeAffinityType, &out.NodeAffinityType
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PodsHavingTooManyRestarts != nil {
|
||||
in, out := &in.PodsHavingTooManyRestarts, &out.PodsHavingTooManyRestarts
|
||||
*out = new(PodsHavingTooManyRestarts)
|
||||
**out = **in
|
||||
}
|
||||
if in.PodLifeTime != nil {
|
||||
in, out := &in.PodLifeTime, &out.PodLifeTime
|
||||
*out = new(PodLifeTime)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.RemoveDuplicates != nil {
|
||||
in, out := &in.RemoveDuplicates, &out.RemoveDuplicates
|
||||
*out = new(RemoveDuplicates)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.FailedPods != nil {
|
||||
in, out := &in.FailedPods, &out.FailedPods
|
||||
*out = new(FailedPods)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = new(Namespaces)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ThresholdPriority != nil {
|
||||
in, out := &in.ThresholdPriority, &out.ThresholdPriority
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludedTaints != nil {
|
||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyParameters.
|
||||
func (in *StrategyParameters) DeepCopy() *StrategyParameters {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StrategyParameters)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2021 The KubeVirt Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
)
|
||||
|
||||
var (
|
||||
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
|
||||
// used for defaulting/converting typed PluginConfig Args.
|
||||
// Access via getPluginArgConversionScheme()
|
||||
pluginArgConversionScheme *runtime.Scheme
|
||||
initPluginArgConversionScheme sync.Once
|
||||
|
||||
Scheme = runtime.NewScheme()
|
||||
Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict)
|
||||
)
|
||||
|
||||
func GetPluginArgConversionScheme() *runtime.Scheme {
|
||||
initPluginArgConversionScheme.Do(func() {
|
||||
// set up the scheme used for plugin arg conversion
|
||||
pluginArgConversionScheme = runtime.NewScheme()
|
||||
utilruntime.Must(AddToScheme(pluginArgConversionScheme))
|
||||
utilruntime.Must(api.AddToScheme(pluginArgConversionScheme))
|
||||
})
|
||||
return pluginArgConversionScheme
|
||||
}
|
||||
|
||||
func Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
if err := autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return convertToInternalPluginConfigArgs(out)
|
||||
}
|
||||
|
||||
// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal
|
||||
// types using a scheme, after applying defaults.
|
||||
func convertToInternalPluginConfigArgs(out *api.DeschedulerPolicy) error {
|
||||
scheme := GetPluginArgConversionScheme()
|
||||
for i := range out.Profiles {
|
||||
prof := &out.Profiles[i]
|
||||
for j := range prof.PluginConfigs {
|
||||
args := prof.PluginConfigs[j].Args
|
||||
if args == nil {
|
||||
continue
|
||||
}
|
||||
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
|
||||
continue
|
||||
}
|
||||
internalArgs, err := scheme.ConvertToVersion(args, api.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
err = nil
|
||||
internalArgs = args
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting .Profiles[%d].PluginConfigs[%d].Args into internal type: %w", i, j, err)
|
||||
}
|
||||
}
|
||||
prof.PluginConfigs[j].Args = internalArgs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if _, ok := pluginregistry.PluginRegistry[in.Name]; ok {
|
||||
out.Args = pluginregistry.PluginRegistry[in.Name].PluginArgInstance.DeepCopyObject()
|
||||
if in.Args.Raw != nil {
|
||||
_, _, err := Codecs.UniversalDecoder().Decode(in.Args.Raw, nil, out.Args)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if in.Args.Object != nil {
|
||||
out.Args = in.Args.Object
|
||||
}
|
||||
} else {
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||
if err := autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return convertToExternalPluginConfigArgs(out)
|
||||
}
|
||||
|
||||
// convertToExternalPluginConfigArgs converts PluginConfig#Args into
|
||||
// external (versioned) types using a scheme.
|
||||
func convertToExternalPluginConfigArgs(out *DeschedulerPolicy) error {
|
||||
scheme := GetPluginArgConversionScheme()
|
||||
for i := range out.Profiles {
|
||||
for j := range out.Profiles[i].PluginConfigs {
|
||||
args := out.Profiles[i].PluginConfigs[j].Args
|
||||
if args.Object == nil {
|
||||
continue
|
||||
}
|
||||
if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown {
|
||||
continue
|
||||
}
|
||||
externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out.Profiles[i].PluginConfigs[j].Args.Object = externalArgs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import "sort"
|
||||
|
||||
func SortDeschedulerProfileByName(profiles []DeschedulerProfile) []DeschedulerProfile {
|
||||
sort.Slice(profiles, func(i, j int) bool {
|
||||
return profiles[i].Name < profiles[j].Name
|
||||
})
|
||||
return profiles
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
type DeschedulerPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// Profiles
|
||||
Profiles []DeschedulerProfile `json:"profiles,omitempty"`
|
||||
|
||||
// NodeSelector for a set of nodes to operate over
|
||||
NodeSelector *string `json:"nodeSelector,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
|
||||
MaxNoOfPodsToEvictPerNode *uint `json:"maxNoOfPodsToEvictPerNode,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
|
||||
MaxNoOfPodsToEvictPerNamespace *uint `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
|
||||
|
||||
// MaxNoOfPodsToTotal restricts maximum of pods to be evicted total.
|
||||
MaxNoOfPodsToEvictTotal *uint `json:"maxNoOfPodsToEvictTotal,omitempty"`
|
||||
|
||||
// EvictionFailureEventNotification should be set to true to enable eviction failure event notification.
|
||||
// Default is false.
|
||||
EvictionFailureEventNotification *bool `json:"evictionFailureEventNotification,omitempty"`
|
||||
|
||||
// MetricsCollector configures collection of metrics for actual resource utilization
|
||||
// Deprecated. Use MetricsProviders field instead.
|
||||
MetricsCollector *MetricsCollector `json:"metricsCollector,omitempty"`
|
||||
|
||||
// MetricsProviders configure collection of metrics about actual resource utilization from various sources
|
||||
MetricsProviders []MetricsProvider `json:"metricsProviders,omitempty"`
|
||||
|
||||
// GracePeriodSeconds The duration in seconds before the object should be deleted. Value must be non-negative integer.
|
||||
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
|
||||
// specified type will be used.
|
||||
// Defaults to a per object value if not specified. zero means delete immediately.
|
||||
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type DeschedulerProfile struct {
|
||||
Name string `json:"name"`
|
||||
PluginConfigs []PluginConfig `json:"pluginConfig"`
|
||||
Plugins Plugins `json:"plugins"`
|
||||
}
|
||||
|
||||
type Plugins struct {
|
||||
PreSort PluginSet `json:"presort"`
|
||||
Sort PluginSet `json:"sort"`
|
||||
Deschedule PluginSet `json:"deschedule"`
|
||||
Balance PluginSet `json:"balance"`
|
||||
Filter PluginSet `json:"filter"`
|
||||
PreEvictionFilter PluginSet `json:"preevictionfilter"`
|
||||
}
|
||||
|
||||
type PluginConfig struct {
|
||||
Name string `json:"name"`
|
||||
Args runtime.RawExtension `json:"args"`
|
||||
}
|
||||
|
||||
type PluginSet struct {
|
||||
Enabled []string `json:"enabled"`
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
type MetricsSource string
|
||||
|
||||
const (
|
||||
// KubernetesMetrics enables metrics from a Kubernetes metrics server.
|
||||
// Please see https://kubernetes-sigs.github.io/metrics-server/ for more.
|
||||
KubernetesMetrics MetricsSource = "KubernetesMetrics"
|
||||
|
||||
// KubernetesMetrics enables metrics from a Prometheus metrics server.
|
||||
PrometheusMetrics MetricsSource = "Prometheus"
|
||||
)
|
||||
|
||||
// MetricsCollector configures collection of metrics about actual resource utilization
|
||||
type MetricsCollector struct {
|
||||
// Enabled metrics collection from Kubernetes metrics server.
|
||||
// Deprecated. Use MetricsProvider.Source field instead.
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
}
|
||||
|
||||
// MetricsProvider configures collection of metrics about actual resource utilization from a given source
|
||||
type MetricsProvider struct {
|
||||
// Source enables metrics from Kubernetes metrics server.
|
||||
Source MetricsSource `json:"source,omitempty"`
|
||||
|
||||
// Prometheus enables metrics collection through Prometheus
|
||||
Prometheus *Prometheus `json:"prometheus,omitempty"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
// authToken used for authentication with the prometheus server.
|
||||
// If not set the in cluster authentication token for the descheduler service
|
||||
// account is read from the container's file system.
|
||||
AuthToken *AuthToken `json:"authToken,omitempty"`
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
// secretReference references an authentication token.
|
||||
// secrets are expected to be created under the descheduler's namespace.
|
||||
SecretReference *SecretReference `json:"secretReference,omitempty"`
|
||||
}
|
||||
|
||||
// SecretReference holds a reference to a Secret
|
||||
type SecretReference struct {
|
||||
// namespace is the namespace of the secret.
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
// name is the name of the secret.
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
@@ -1,437 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
api "sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*AuthToken)(nil), (*api.AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_AuthToken_To_api_AuthToken(a.(*AuthToken), b.(*api.AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.AuthToken)(nil), (*AuthToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_AuthToken_To_v1alpha2_AuthToken(a.(*api.AuthToken), b.(*AuthToken), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*DeschedulerProfile)(nil), (*api.DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(a.(*DeschedulerProfile), b.(*api.DeschedulerProfile), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.DeschedulerProfile)(nil), (*DeschedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(a.(*api.DeschedulerProfile), b.(*DeschedulerProfile), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsCollector)(nil), (*api.MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(a.(*MetricsCollector), b.(*api.MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsCollector)(nil), (*MetricsCollector)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(a.(*api.MetricsCollector), b.(*MetricsCollector), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*MetricsProvider)(nil), (*api.MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(a.(*MetricsProvider), b.(*api.MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.MetricsProvider)(nil), (*MetricsProvider)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(a.(*api.MetricsProvider), b.(*MetricsProvider), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginConfig)(nil), (*PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginConfig_To_v1alpha2_PluginConfig(a.(*api.PluginConfig), b.(*PluginConfig), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*PluginSet)(nil), (*api.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PluginSet_To_api_PluginSet(a.(*PluginSet), b.(*api.PluginSet), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.PluginSet)(nil), (*PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_PluginSet_To_v1alpha2_PluginSet(a.(*api.PluginSet), b.(*PluginSet), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Plugins)(nil), (*api.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_Plugins_To_api_Plugins(a.(*Plugins), b.(*api.Plugins), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Plugins)(nil), (*Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Plugins_To_v1alpha2_Plugins(a.(*api.Plugins), b.(*Plugins), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*Prometheus)(nil), (*api.Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_Prometheus_To_api_Prometheus(a.(*Prometheus), b.(*api.Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.Prometheus)(nil), (*Prometheus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_Prometheus_To_v1alpha2_Prometheus(a.(*api.Prometheus), b.(*Prometheus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*SecretReference)(nil), (*api.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_SecretReference_To_api_SecretReference(a.(*SecretReference), b.(*api.SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*api.SecretReference)(nil), (*SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_SecretReference_To_v1alpha2_SecretReference(a.(*api.SecretReference), b.(*SecretReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*api.DeschedulerPolicy)(nil), (*DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(a.(*api.DeschedulerPolicy), b.(*DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*DeschedulerPolicy)(nil), (*api.DeschedulerPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(a.(*DeschedulerPolicy), b.(*api.DeschedulerPolicy), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*PluginConfig)(nil), (*api.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha2_PluginConfig_To_api_PluginConfig(a.(*PluginConfig), b.(*api.PluginConfig), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*api.SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_AuthToken_To_api_AuthToken is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_AuthToken_To_api_AuthToken(in *AuthToken, out *api.AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_AuthToken_To_api_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
out.SecretReference = (*SecretReference)(unsafe.Pointer(in.SecretReference))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_AuthToken_To_v1alpha2_AuthToken is an autogenerated conversion function.
|
||||
func Convert_api_AuthToken_To_v1alpha2_AuthToken(in *api.AuthToken, out *AuthToken, s conversion.Scope) error {
|
||||
return autoConvert_api_AuthToken_To_v1alpha2_AuthToken(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
*out = make([]api.DeschedulerProfile, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Profiles = nil
|
||||
}
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
out.MetricsCollector = (*api.MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]api.MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_api_DeschedulerPolicy_To_v1alpha2_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
*out = make([]DeschedulerProfile, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Profiles = nil
|
||||
}
|
||||
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
|
||||
out.MaxNoOfPodsToEvictPerNode = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
|
||||
out.MaxNoOfPodsToEvictPerNamespace = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNamespace))
|
||||
out.MaxNoOfPodsToEvictTotal = (*uint)(unsafe.Pointer(in.MaxNoOfPodsToEvictTotal))
|
||||
out.EvictionFailureEventNotification = (*bool)(unsafe.Pointer(in.EvictionFailureEventNotification))
|
||||
out.MetricsCollector = (*MetricsCollector)(unsafe.Pointer(in.MetricsCollector))
|
||||
out.MetricsProviders = *(*[]MetricsProvider)(unsafe.Pointer(&in.MetricsProviders))
|
||||
out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds))
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if in.PluginConfigs != nil {
|
||||
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||
*out = make([]api.PluginConfig, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1alpha2_PluginConfig_To_api_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PluginConfigs = nil
|
||||
}
|
||||
if err := Convert_v1alpha2_Plugins_To_api_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in *DeschedulerProfile, out *api.DeschedulerProfile, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_DeschedulerProfile_To_api_DeschedulerProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if in.PluginConfigs != nil {
|
||||
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||
*out = make([]PluginConfig, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_api_PluginConfig_To_v1alpha2_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.PluginConfigs = nil
|
||||
}
|
||||
if err := Convert_api_Plugins_To_v1alpha2_Plugins(&in.Plugins, &out.Plugins, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile is an autogenerated conversion function.
|
||||
func Convert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in *api.DeschedulerProfile, out *DeschedulerProfile, s conversion.Scope) error {
|
||||
return autoConvert_api_DeschedulerProfile_To_v1alpha2_DeschedulerProfile(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in *MetricsCollector, out *api.MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsCollector_To_api_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
out.Enabled = in.Enabled
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector is an autogenerated conversion function.
|
||||
func Convert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in *api.MetricsCollector, out *MetricsCollector, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsCollector_To_v1alpha2_MetricsCollector(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = api.MetricsSource(in.Source)
|
||||
out.Prometheus = (*api.Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in *MetricsProvider, out *api.MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_MetricsProvider_To_api_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
out.Source = MetricsSource(in.Source)
|
||||
out.Prometheus = (*Prometheus)(unsafe.Pointer(in.Prometheus))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider is an autogenerated conversion function.
|
||||
func Convert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in *api.MetricsProvider, out *MetricsProvider, s conversion.Scope) error {
|
||||
return autoConvert_api_MetricsProvider_To_v1alpha2_MetricsProvider(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginConfig_To_api_PluginConfig(in *PluginConfig, out *api.PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
|
||||
out.Name = in.Name
|
||||
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PluginConfig_To_v1alpha2_PluginConfig is an autogenerated conversion function.
|
||||
func Convert_api_PluginConfig_To_v1alpha2_PluginConfig(in *api.PluginConfig, out *PluginConfig, s conversion.Scope) error {
|
||||
return autoConvert_api_PluginConfig_To_v1alpha2_PluginConfig(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
|
||||
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
|
||||
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_PluginSet_To_api_PluginSet is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_PluginSet_To_api_PluginSet(in *PluginSet, out *api.PluginSet, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_PluginSet_To_api_PluginSet(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
|
||||
out.Enabled = *(*[]string)(unsafe.Pointer(&in.Enabled))
|
||||
out.Disabled = *(*[]string)(unsafe.Pointer(&in.Disabled))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_PluginSet_To_v1alpha2_PluginSet is an autogenerated conversion function.
|
||||
func Convert_api_PluginSet_To_v1alpha2_PluginSet(in *api.PluginSet, out *PluginSet, s conversion.Scope) error {
|
||||
return autoConvert_api_PluginSet_To_v1alpha2_PluginSet(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Sort, &out.Sort, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Balance, &out.Balance, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.Filter, &out.Filter, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha2_PluginSet_To_api_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_Plugins_To_api_Plugins is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_Plugins_To_api_Plugins(in *Plugins, out *api.Plugins, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_Plugins_To_api_Plugins(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreSort, &out.PreSort, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Sort, &out.Sort, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Deschedule, &out.Deschedule, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Balance, &out.Balance, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.Filter, &out.Filter, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_api_PluginSet_To_v1alpha2_PluginSet(&in.PreEvictionFilter, &out.PreEvictionFilter, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Plugins_To_v1alpha2_Plugins is an autogenerated conversion function.
|
||||
func Convert_api_Plugins_To_v1alpha2_Plugins(in *api.Plugins, out *Plugins, s conversion.Scope) error {
|
||||
return autoConvert_api_Plugins_To_v1alpha2_Plugins(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*api.AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_Prometheus_To_api_Prometheus is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_Prometheus_To_api_Prometheus(in *Prometheus, out *api.Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_Prometheus_To_api_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
out.URL = in.URL
|
||||
out.AuthToken = (*AuthToken)(unsafe.Pointer(in.AuthToken))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_Prometheus_To_v1alpha2_Prometheus is an autogenerated conversion function.
|
||||
func Convert_api_Prometheus_To_v1alpha2_Prometheus(in *api.Prometheus, out *Prometheus, s conversion.Scope) error {
|
||||
return autoConvert_api_Prometheus_To_v1alpha2_Prometheus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha2_SecretReference_To_api_SecretReference is an autogenerated conversion function.
|
||||
func Convert_v1alpha2_SecretReference_To_api_SecretReference(in *SecretReference, out *api.SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha2_SecretReference_To_api_SecretReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_api_SecretReference_To_v1alpha2_SecretReference is an autogenerated conversion function.
|
||||
func Convert_api_SecretReference_To_v1alpha2_SecretReference(in *api.SecretReference, out *SecretReference, s conversion.Scope) error {
|
||||
return autoConvert_api_SecretReference_To_v1alpha2_SecretReference(in, out, s)
|
||||
}
|
||||
@@ -1,284 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
*out = make([]DeschedulerProfile, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNode != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictPerNamespace != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerPolicy.
|
||||
func (in *DeschedulerPolicy) DeepCopy() *DeschedulerPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
|
||||
*out = *in
|
||||
if in.PluginConfigs != nil {
|
||||
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||
*out = make([]PluginConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
|
||||
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerProfile)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginConfig) DeepCopyInto(out *PluginConfig) {
|
||||
*out = *in
|
||||
in.Args.DeepCopyInto(&out.Args)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig.
|
||||
func (in *PluginConfig) DeepCopy() *PluginConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PluginConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PluginSet) DeepCopyInto(out *PluginSet) {
|
||||
*out = *in
|
||||
if in.Enabled != nil {
|
||||
in, out := &in.Enabled, &out.Enabled
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Disabled != nil {
|
||||
in, out := &in.Disabled, &out.Disabled
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet.
|
||||
func (in *PluginSet) DeepCopy() *PluginSet {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PluginSet)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Plugins) DeepCopyInto(out *Plugins) {
|
||||
*out = *in
|
||||
in.PreSort.DeepCopyInto(&out.PreSort)
|
||||
in.Sort.DeepCopyInto(&out.Sort)
|
||||
in.Deschedule.DeepCopyInto(&out.Deschedule)
|
||||
in.Balance.DeepCopyInto(&out.Balance)
|
||||
in.Filter.DeepCopyInto(&out.Filter)
|
||||
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins.
|
||||
func (in *Plugins) DeepCopy() *Plugins {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Plugins)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -25,34 +25,13 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuthToken) DeepCopyInto(out *AuthToken) {
|
||||
*out = *in
|
||||
if in.SecretReference != nil {
|
||||
in, out := &in.SecretReference, &out.SecretReference
|
||||
*out = new(SecretReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthToken.
|
||||
func (in *AuthToken) DeepCopy() *AuthToken {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuthToken)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Profiles != nil {
|
||||
in, out := &in.Profiles, &out.Profiles
|
||||
*out = make([]DeschedulerProfile, len(*in))
|
||||
*out = make([]Profile, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
@@ -72,33 +51,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.MaxNoOfPodsToEvictTotal != nil {
|
||||
in, out := &in.MaxNoOfPodsToEvictTotal, &out.MaxNoOfPodsToEvictTotal
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
if in.EvictionFailureEventNotification != nil {
|
||||
in, out := &in.EvictionFailureEventNotification, &out.EvictionFailureEventNotification
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsCollector != nil {
|
||||
in, out := &in.MetricsCollector, &out.MetricsCollector
|
||||
*out = new(MetricsCollector)
|
||||
**out = **in
|
||||
}
|
||||
if in.MetricsProviders != nil {
|
||||
in, out := &in.MetricsProviders, &out.MetricsProviders
|
||||
*out = make([]MetricsProvider, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.GracePeriodSeconds != nil {
|
||||
in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -120,88 +72,6 @@ func (in *DeschedulerPolicy) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeschedulerProfile) DeepCopyInto(out *DeschedulerProfile) {
|
||||
*out = *in
|
||||
if in.PluginConfigs != nil {
|
||||
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||
*out = make([]PluginConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeschedulerProfile.
|
||||
func (in *DeschedulerProfile) DeepCopy() *DeschedulerProfile {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeschedulerProfile)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EvictionLimits) DeepCopyInto(out *EvictionLimits) {
|
||||
*out = *in
|
||||
if in.Node != nil {
|
||||
in, out := &in.Node, &out.Node
|
||||
*out = new(uint)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvictionLimits.
|
||||
func (in *EvictionLimits) DeepCopy() *EvictionLimits {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EvictionLimits)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsCollector) DeepCopyInto(out *MetricsCollector) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsCollector.
|
||||
func (in *MetricsCollector) DeepCopy() *MetricsCollector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsCollector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MetricsProvider) DeepCopyInto(out *MetricsProvider) {
|
||||
*out = *in
|
||||
if in.Prometheus != nil {
|
||||
in, out := &in.Prometheus, &out.Prometheus
|
||||
*out = new(Prometheus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsProvider.
|
||||
func (in *MetricsProvider) DeepCopy() *MetricsProvider {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MetricsProvider)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
|
||||
*out = *in
|
||||
@@ -280,6 +150,7 @@ func (in *Plugins) DeepCopyInto(out *Plugins) {
|
||||
in.Sort.DeepCopyInto(&out.Sort)
|
||||
in.Deschedule.DeepCopyInto(&out.Deschedule)
|
||||
in.Balance.DeepCopyInto(&out.Balance)
|
||||
in.Evict.DeepCopyInto(&out.Evict)
|
||||
in.Filter.DeepCopyInto(&out.Filter)
|
||||
in.PreEvictionFilter.DeepCopyInto(&out.PreEvictionFilter)
|
||||
return
|
||||
@@ -317,22 +188,25 @@ func (in *PriorityThreshold) DeepCopy() *PriorityThreshold {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Prometheus) DeepCopyInto(out *Prometheus) {
|
||||
func (in *Profile) DeepCopyInto(out *Profile) {
|
||||
*out = *in
|
||||
if in.AuthToken != nil {
|
||||
in, out := &in.AuthToken, &out.AuthToken
|
||||
*out = new(AuthToken)
|
||||
(*in).DeepCopyInto(*out)
|
||||
if in.PluginConfigs != nil {
|
||||
in, out := &in.PluginConfigs, &out.PluginConfigs
|
||||
*out = make([]PluginConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.Plugins.DeepCopyInto(&out.Plugins)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus.
|
||||
func (in *Prometheus) DeepCopy() *Prometheus {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile.
|
||||
func (in *Profile) DeepCopy() *Profile {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Prometheus)
|
||||
out := new(Profile)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -358,19 +232,3 @@ func (in ResourceThresholds) DeepCopy() ResourceThresholds {
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
|
||||
func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(SecretReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -33,7 +34,6 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// KubeconfigFile is path to kubeconfig file with authorization and master
|
||||
// location information.
|
||||
// Deprecated: Use clientConnection.kubeConfig instead.
|
||||
KubeconfigFile string
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
@@ -51,41 +51,13 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// Tracing specifies the options for tracing.
|
||||
Tracing TracingConfiguration
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
registry "k8s.io/component-base/logs/api/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -33,7 +34,6 @@ type DeschedulerConfiguration struct {
|
||||
|
||||
// KubeconfigFile is path to kubeconfig file with authorization and master
|
||||
// location information.
|
||||
// Deprecated: Use clientConnection.kubeConfig instead.
|
||||
KubeconfigFile string `json:"kubeconfigFile"`
|
||||
|
||||
// PolicyConfigFile is the filepath to the descheduler policy configuration.
|
||||
@@ -51,41 +51,13 @@ type DeschedulerConfiguration struct {
|
||||
// EvictLocalStoragePods allows pods using local storage to be evicted.
|
||||
EvictLocalStoragePods bool `json:"evictLocalStoragePods,omitempty"`
|
||||
|
||||
// EvictDaemonSetPods allows pods owned by a DaemonSet resource to be evicted.
|
||||
EvictDaemonSetPods bool `json:"evictDaemonSetPods,omitempty"`
|
||||
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// Tracing is used to setup the required OTEL tracing configuration
|
||||
Tracing TracingConfiguration `json:"tracing,omitempty"`
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// ClientConnection specifies the kubeconfig file and client connection settings to use when communicating with the apiserver.
|
||||
// Refer to [ClientConnection](https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/componentconfig#ClientConnectionConfiguration) for more information.
|
||||
ClientConnection componentbaseconfig.ClientConnectionConfiguration `json:"clientConnection,omitempty"`
|
||||
}
|
||||
|
||||
type TracingConfiguration struct {
|
||||
// CollectorEndpoint is the address of the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used NoopTraceProvider.
|
||||
CollectorEndpoint string `json:"collectorEndpoint"`
|
||||
// TransportCert is the path to the certificate file for the OpenTelemetry collector.
|
||||
// If not specified, provider will start in insecure mode.
|
||||
TransportCert string `json:"transportCert,omitempty"`
|
||||
// ServiceName is the name of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, the default value is "descheduler".
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
// ServiceNamespace is the namespace of the service to be used in the OpenTelemetry collector.
|
||||
// If not specified, tracing will be used default namespace.
|
||||
ServiceNamespace string `json:"serviceNamespace,omitempty"`
|
||||
// SampleRate is used to configure the sample rate of the OTEL trace collection. This value will
|
||||
// be used as the Base value with sample ratio. A value >= 1.0 will sample everything and < 0 will
|
||||
// not sample anything. Everything else is a percentage value.
|
||||
SampleRate float64 `json:"sampleRate"`
|
||||
// FallbackToNoOpProviderOnError can be set in case if you want your trace provider to fallback to
|
||||
// no op provider in case if the configured end point based provider can't be setup.
|
||||
FallbackToNoOpProviderOnError bool `json:"fallbackToNoOpProviderOnError"`
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/api/v1/options.go) for more information.
|
||||
Logging registry.LoggingConfiguration `json:"logging,omitempty"`
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -46,16 +46,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*componentconfig.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(a.(*TracingConfiguration), b.(*componentconfig.TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*componentconfig.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*componentconfig.TracingConfiguration), b.(*TracingConfiguration), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -67,13 +57,9 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.ClientConnection = in.ClientConnection
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -90,13 +76,9 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.NodeSelector = in.NodeSelector
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.EvictDaemonSetPods = in.EvictDaemonSetPods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
if err := Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(&in.Tracing, &out.Tracing, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.ClientConnection = in.ClientConnection
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -104,33 +86,3 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
func Convert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in *componentconfig.DeschedulerConfiguration, out *DeschedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_DeschedulerConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in *TracingConfiguration, out *componentconfig.TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_TracingConfiguration_To_componentconfig_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
out.CollectorEndpoint = in.CollectorEndpoint
|
||||
out.TransportCert = in.TransportCert
|
||||
out.ServiceName = in.ServiceName
|
||||
out.ServiceNamespace = in.ServiceNamespace
|
||||
out.SampleRate = in.SampleRate
|
||||
out.FallbackToNoOpProviderOnError = in.FallbackToNoOpProviderOnError
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
|
||||
func Convert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *componentconfig.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_componentconfig_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -29,9 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.ClientConnection = in.ClientConnection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,19 +51,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -29,9 +29,8 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.Tracing = in.Tracing
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.ClientConnection = in.ClientConnection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,19 +51,3 @@ func (in *DeschedulerConfiguration) DeepCopyObject() runtime.Object {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
|
||||
func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TracingConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -17,78 +17,41 @@ limitations under the License.
|
||||
package client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/common/config"
|
||||
|
||||
// Ensure to load all auth plugins.
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
// Ensure to load all auth plugins.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/transport"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
var K8sPodCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
|
||||
func createConfig(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (*rest.Config, error) {
|
||||
func CreateClient(kubeconfig, userAgt string) (clientset.Interface, error) {
|
||||
var cfg *rest.Config
|
||||
if len(clientConnection.Kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(clientConnection.Kubeconfig)
|
||||
if len(kubeconfig) != 0 {
|
||||
master, err := GetMasterFromKubeconfig(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse kubeconfig file: %v ", err)
|
||||
return nil, fmt.Errorf("Failed to parse kubeconfig file: %v ", err)
|
||||
}
|
||||
|
||||
cfg, err = clientcmd.BuildConfigFromFlags(master, clientConnection.Kubeconfig)
|
||||
cfg, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build config: %v", err)
|
||||
return nil, fmt.Errorf("Unable to build config: %v", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
var err error
|
||||
cfg, err = rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build in cluster config: %v", err)
|
||||
return nil, fmt.Errorf("Unable to build in cluster config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg.Burst = int(clientConnection.Burst)
|
||||
cfg.QPS = clientConnection.QPS
|
||||
|
||||
if len(userAgt) != 0 {
|
||||
cfg = rest.AddUserAgent(cfg, userAgt)
|
||||
return clientset.NewForConfig(rest.AddUserAgent(cfg, userAgt))
|
||||
} else {
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func CreateClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (clientset.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
return clientset.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func CreateMetricsClient(clientConnection componentbaseconfig.ClientConnectionConfiguration, userAgt string) (metricsclient.Interface, error) {
|
||||
cfg, err := createConfig(clientConnection, userAgt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create config: %v", err)
|
||||
}
|
||||
|
||||
// Create the metrics clientset to access the metrics.k8s.io API
|
||||
return metricsclient.NewForConfig(cfg)
|
||||
}
|
||||
|
||||
func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
@@ -99,69 +62,11 @@ func GetMasterFromKubeconfig(filename string) (string, error) {
|
||||
|
||||
context, ok := config.Contexts[config.CurrentContext]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: current context not found")
|
||||
return "", fmt.Errorf("Failed to get master address from kubeconfig")
|
||||
}
|
||||
|
||||
if val, ok := config.Clusters[context.Cluster]; ok {
|
||||
return val.Server, nil
|
||||
}
|
||||
return "", fmt.Errorf("failed to get master address from kubeconfig: cluster information not found")
|
||||
}
|
||||
|
||||
func loadCAFile(filepath string) (*x509.CertPool, error) {
|
||||
caCert, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caCertPool := x509.NewCertPool()
|
||||
if ok := caCertPool.AppendCertsFromPEM(caCert); !ok {
|
||||
return nil, fmt.Errorf("failed to append CA certificate to the pool")
|
||||
}
|
||||
|
||||
return caCertPool, nil
|
||||
}
|
||||
|
||||
func CreatePrometheusClient(prometheusURL, authToken string) (promapi.Client, *http.Transport, error) {
|
||||
// Retrieve Pod CA cert
|
||||
caCertPool, err := loadCAFile(K8sPodCAFilePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error loading CA file: %v", err)
|
||||
}
|
||||
|
||||
// Get Prometheus Host
|
||||
u, err := url.Parse(prometheusURL)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error parsing prometheus URL: %v", err)
|
||||
}
|
||||
t := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
ServerName: u.Host,
|
||||
},
|
||||
}
|
||||
roundTripper := transport.NewBearerAuthRoundTripper(
|
||||
authToken,
|
||||
t,
|
||||
)
|
||||
|
||||
if authToken != "" {
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", config.NewInlineSecret(authToken), roundTripper),
|
||||
})
|
||||
return client, t, err
|
||||
}
|
||||
client, err := promapi.NewClient(promapi.Config{
|
||||
Address: prometheusURL,
|
||||
})
|
||||
return client, t, err
|
||||
return "", fmt.Errorf("Failed to get master address from kubeconfig")
|
||||
}
|
||||
|
||||
@@ -19,40 +19,21 @@ package descheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
schedulingv1 "k8s.io/client-go/listers/scheduling/v1"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
@@ -60,416 +41,25 @@ import (
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/metricscollector"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
frameworkprofile "sigs.k8s.io/descheduler/pkg/framework/profile"
|
||||
frameworktypes "sigs.k8s.io/descheduler/pkg/framework/types"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
"sigs.k8s.io/descheduler/pkg/framework"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
prometheusAuthTokenSecretKey = "prometheusAuthToken"
|
||||
workQueueKey = "key"
|
||||
)
|
||||
|
||||
type eprunner func(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status
|
||||
|
||||
type profileRunner struct {
|
||||
name string
|
||||
descheduleEPs, balanceEPs eprunner
|
||||
}
|
||||
|
||||
type descheduler struct {
|
||||
rs *options.DeschedulerServer
|
||||
ir *informerResources
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
namespacedSecretsLister corev1listers.SecretNamespaceLister
|
||||
deschedulerPolicy *api.DeschedulerPolicy
|
||||
eventRecorder events.EventRecorder
|
||||
podEvictor *evictions.PodEvictor
|
||||
podEvictionReactionFnc func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error)
|
||||
metricsCollector *metricscollector.MetricsCollector
|
||||
prometheusClient promapi.Client
|
||||
previousPrometheusClientTransport *http.Transport
|
||||
queue workqueue.RateLimitingInterface
|
||||
currentPrometheusAuthToken string
|
||||
metricsProviders map[api.MetricsSource]*api.MetricsProvider
|
||||
}
|
||||
|
||||
type informerResources struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
resourceToInformer map[schema.GroupVersionResource]informers.GenericInformer
|
||||
}
|
||||
|
||||
func newInformerResources(sharedInformerFactory informers.SharedInformerFactory) *informerResources {
|
||||
return &informerResources{
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
resourceToInformer: make(map[schema.GroupVersionResource]informers.GenericInformer),
|
||||
}
|
||||
}
|
||||
|
||||
func (ir *informerResources) Uses(resources ...schema.GroupVersionResource) error {
|
||||
for _, resource := range resources {
|
||||
informer, err := ir.sharedInformerFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ir.resourceToInformer[resource] = informer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyTo Copy informer subscriptions to the new factory and objects to the fake client so that the backing caches are populated for when listers are used.
|
||||
func (ir *informerResources) CopyTo(fakeClient *fakeclientset.Clientset, newFactory informers.SharedInformerFactory) error {
|
||||
for resource, informer := range ir.resourceToInformer {
|
||||
_, err := newFactory.ForResource(resource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting resource %s: %w", resource, err)
|
||||
}
|
||||
|
||||
objects, err := informer.Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing %s: %w", informer, err)
|
||||
}
|
||||
|
||||
for _, object := range objects {
|
||||
fakeClient.Tracker().Add(object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func metricsProviderListToMap(providersList []api.MetricsProvider) map[api.MetricsSource]*api.MetricsProvider {
|
||||
providersMap := make(map[api.MetricsSource]*api.MetricsProvider)
|
||||
for _, provider := range providersList {
|
||||
providersMap[provider.Source] = &provider
|
||||
}
|
||||
return providersMap
|
||||
}
|
||||
|
||||
func newDescheduler(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, eventRecorder events.EventRecorder, sharedInformerFactory, namespacedSharedInformerFactory informers.SharedInformerFactory) (*descheduler, error) {
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
ir := newInformerResources(sharedInformerFactory)
|
||||
ir.Uses(v1.SchemeGroupVersion.WithResource("pods"),
|
||||
v1.SchemeGroupVersion.WithResource("nodes"),
|
||||
// Future work could be to let each plugin declare what type of resources it needs; that way dry runs would stay
|
||||
// consistent with the real runs without having to keep the list here in sync.
|
||||
v1.SchemeGroupVersion.WithResource("namespaces"), // Used by the defaultevictor plugin
|
||||
schedulingv1.SchemeGroupVersion.WithResource("priorityclasses"), // Used by the defaultevictor plugin
|
||||
policyv1.SchemeGroupVersion.WithResource("poddisruptionbudgets"), // Used by the defaultevictor plugin
|
||||
|
||||
) // Used by the defaultevictor plugin
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
podEvictor, err := evictions.NewPodEvictor(
|
||||
ctx,
|
||||
rs.Client,
|
||||
eventRecorder,
|
||||
podInformer,
|
||||
rs.DefaultFeatureGates,
|
||||
evictions.NewOptions().
|
||||
WithPolicyGroupVersion(evictionPolicyGroupVersion).
|
||||
WithMaxPodsToEvictPerNode(deschedulerPolicy.MaxNoOfPodsToEvictPerNode).
|
||||
WithMaxPodsToEvictPerNamespace(deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace).
|
||||
WithMaxPodsToEvictTotal(deschedulerPolicy.MaxNoOfPodsToEvictTotal).
|
||||
WithEvictionFailureEventNotification(deschedulerPolicy.EvictionFailureEventNotification).
|
||||
WithGracePeriodSeconds(deschedulerPolicy.GracePeriodSeconds).
|
||||
WithDryRun(rs.DryRun).
|
||||
WithMetricsEnabled(!rs.DisableMetrics),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
desch := &descheduler{
|
||||
rs: rs,
|
||||
ir: ir,
|
||||
getPodsAssignedToNode: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
deschedulerPolicy: deschedulerPolicy,
|
||||
eventRecorder: eventRecorder,
|
||||
podEvictor: podEvictor,
|
||||
podEvictionReactionFnc: podEvictionReactionFnc,
|
||||
prometheusClient: rs.PrometheusClient,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: "descheduler"}),
|
||||
metricsProviders: metricsProviderListToMap(deschedulerPolicy.MetricsProviders),
|
||||
}
|
||||
|
||||
if rs.MetricsClient != nil {
|
||||
nodeSelector := labels.Everything()
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
sel, err := labels.Parse(*deschedulerPolicy.NodeSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeSelector = sel
|
||||
}
|
||||
desch.metricsCollector = metricscollector.NewMetricsCollector(sharedInformerFactory.Core().V1().Nodes().Lister(), rs.MetricsClient, nodeSelector)
|
||||
}
|
||||
|
||||
prometheusProvider := desch.metricsProviders[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.AuthToken != nil {
|
||||
authTokenSecret := prometheusProvider.Prometheus.AuthToken.SecretReference
|
||||
if authTokenSecret == nil || authTokenSecret.Namespace == "" {
|
||||
return nil, fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
if namespacedSharedInformerFactory == nil {
|
||||
return nil, fmt.Errorf("namespacedSharedInformerFactory not configured")
|
||||
}
|
||||
namespacedSharedInformerFactory.Core().V1().Secrets().Informer().AddEventHandler(desch.eventHandler())
|
||||
desch.namespacedSecretsLister = namespacedSharedInformerFactory.Core().V1().Secrets().Lister().Secrets(authTokenSecret.Namespace)
|
||||
}
|
||||
|
||||
return desch, nil
|
||||
}
|
||||
|
||||
func (d *descheduler) reconcileInClusterSAToken() error {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
cfg, err := rest.InClusterConfig()
|
||||
if err == nil {
|
||||
if d.currentPrometheusAuthToken != cfg.BearerToken {
|
||||
klog.V(2).Infof("Creating Prometheus client (with SA token)")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(d.metricsProviders[api.PrometheusMetrics].Prometheus.URL, cfg.BearerToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = cfg.BearerToken
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err == rest.ErrNotInCluster {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unexpected error when reading in cluster config: %v", err)
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconciler(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer d.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting authentication secret reconciler")
|
||||
defer klog.Infof("Shutting down authentication secret reconciler")
|
||||
|
||||
go wait.UntilWithContext(ctx, d.runAuthenticationSecretReconcilerWorker, time.Second)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (d *descheduler) runAuthenticationSecretReconcilerWorker(ctx context.Context) {
|
||||
for d.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) processNextWorkItem(ctx context.Context) bool {
|
||||
dsKey, quit := d.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer d.queue.Done(dsKey)
|
||||
|
||||
err := d.sync()
|
||||
if err == nil {
|
||||
d.queue.Forget(dsKey)
|
||||
return true
|
||||
}
|
||||
|
||||
utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
|
||||
d.queue.AddRateLimited(dsKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *descheduler) sync() error {
|
||||
prometheusConfig := d.metricsProviders[api.PrometheusMetrics].Prometheus
|
||||
if prometheusConfig == nil || prometheusConfig.AuthToken == nil || prometheusConfig.AuthToken.SecretReference == nil {
|
||||
return fmt.Errorf("prometheus metrics source configuration is missing authentication token secret")
|
||||
}
|
||||
ns := prometheusConfig.AuthToken.SecretReference.Namespace
|
||||
name := prometheusConfig.AuthToken.SecretReference.Name
|
||||
secretObj, err := d.namespacedSecretsLister.Get(name)
|
||||
if err != nil {
|
||||
// clear the token if the secret is not found
|
||||
if apierrors.IsNotFound(err) {
|
||||
d.currentPrometheusAuthToken = ""
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = nil
|
||||
d.prometheusClient = nil
|
||||
}
|
||||
return fmt.Errorf("unable to get %v/%v secret", ns, name)
|
||||
}
|
||||
authToken := string(secretObj.Data[prometheusAuthTokenSecretKey])
|
||||
if authToken == "" {
|
||||
return fmt.Errorf("prometheus authentication token secret missing %q data or empty", prometheusAuthTokenSecretKey)
|
||||
}
|
||||
if d.currentPrometheusAuthToken == authToken {
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("authentication secret token updated, recreating prometheus client")
|
||||
prometheusClient, transport, err := client.CreatePrometheusClient(prometheusConfig.URL, authToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create a prometheus client: %v", err)
|
||||
}
|
||||
d.prometheusClient = prometheusClient
|
||||
if d.previousPrometheusClientTransport != nil {
|
||||
d.previousPrometheusClientTransport.CloseIdleConnections()
|
||||
}
|
||||
d.previousPrometheusClientTransport = transport
|
||||
d.currentPrometheusAuthToken = authToken
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *descheduler) eventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
UpdateFunc: func(old, new interface{}) { d.queue.Add(workQueueKey) },
|
||||
DeleteFunc: func(obj interface{}) { d.queue.Add(workQueueKey) },
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descheduler) runDeschedulerLoop(ctx context.Context, nodes []*v1.Node) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runDeschedulerLoop")
|
||||
defer span.End()
|
||||
defer func(loopStartDuration time.Time) {
|
||||
metrics.DeschedulerLoopDuration.With(map[string]string{}).Observe(time.Since(loopStartDuration).Seconds())
|
||||
}(time.Now())
|
||||
|
||||
// if len is still <= 1 error out
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
return fmt.Errorf("the cluster size is 0 or 1")
|
||||
}
|
||||
|
||||
var client clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if d.rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", d.podEvictionReactionFnc(fakeClient))
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
|
||||
err := d.ir.CopyTo(fakeClient, fakeSharedInformerFactory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a new instance of the shared informer factor from the cached client
|
||||
// register the pod informer, otherwise it will not get running
|
||||
d.getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
client = fakeClient
|
||||
d.sharedInformerFactory = fakeSharedInformerFactory
|
||||
} else {
|
||||
client = d.rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Setting up the pod evictor")
|
||||
d.podEvictor.SetClient(client)
|
||||
d.podEvictor.ResetCounters()
|
||||
|
||||
d.runProfiles(ctx, client, nodes)
|
||||
|
||||
klog.V(1).InfoS("Number of evictions/requests", "totalEvicted", d.podEvictor.TotalEvicted(), "evictionRequests", d.podEvictor.TotalEvictionRequests())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runProfiles runs all the deschedule plugins of all profiles and
|
||||
// later runs through all balance plugins of all profiles. (All Balance plugins should come after all Deschedule plugins)
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/979
|
||||
func (d *descheduler) runProfiles(ctx context.Context, client clientset.Interface, nodes []*v1.Node) {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "runProfiles")
|
||||
defer span.End()
|
||||
var profileRunners []profileRunner
|
||||
for _, profile := range d.deschedulerPolicy.Profiles {
|
||||
currProfile, err := frameworkprofile.NewProfile(
|
||||
profile,
|
||||
pluginregistry.PluginRegistry,
|
||||
frameworkprofile.WithClientSet(client),
|
||||
frameworkprofile.WithSharedInformerFactory(d.sharedInformerFactory),
|
||||
frameworkprofile.WithPodEvictor(d.podEvictor),
|
||||
frameworkprofile.WithGetPodsAssignedToNodeFnc(d.getPodsAssignedToNode),
|
||||
frameworkprofile.WithMetricsCollector(d.metricsCollector),
|
||||
frameworkprofile.WithPrometheusClient(d.prometheusClient),
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to create a profile", "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
profileRunners = append(profileRunners, profileRunner{profile.Name, currProfile.RunDeschedulePlugins, currProfile.RunBalancePlugins})
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// First deschedule
|
||||
status := profileR.descheduleEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform deschedule operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.DescheduleOperation)))
|
||||
klog.ErrorS(status.Err, "running deschedule extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, profileR := range profileRunners {
|
||||
// Balance Later
|
||||
status := profileR.balanceEPs(ctx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
span.AddEvent("failed to perform balance operations", trace.WithAttributes(attribute.String("err", status.Err.Error()), attribute.String("profile", profileR.name), attribute.String("operation", tracing.BalanceOperation)))
|
||||
klog.ErrorS(status.Err, "running balance extension point failed with error", "profile", profileR.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "Run")
|
||||
defer span.End()
|
||||
metrics.Register()
|
||||
|
||||
clientConnection := rs.ClientConnection
|
||||
if rs.KubeconfigFile != "" && clientConnection.Kubeconfig == "" {
|
||||
clientConnection.Kubeconfig = rs.KubeconfigFile
|
||||
}
|
||||
rsclient, eventClient, err := createClients(clientConnection)
|
||||
rsclient, eventClient, err := createClients(rs.KubeconfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.Client = rsclient
|
||||
rs.EventClient = eventClient
|
||||
|
||||
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginregistry.PluginRegistry)
|
||||
deschedulerPolicy, err := LoadPolicyConfig(rs.PolicyConfigFile, rs.Client, pluginbuilder.PluginRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -477,40 +67,25 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return fmt.Errorf("deschedulerPolicy is nil")
|
||||
}
|
||||
|
||||
// Add k8s compatibility warnings to logs
|
||||
if err := validateVersionCompatibility(rs.Client.Discovery(), version.Get()); err != nil {
|
||||
klog.Warning(err.Error())
|
||||
}
|
||||
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(rs.Client)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
if (deschedulerPolicy.MetricsCollector != nil && deschedulerPolicy.MetricsCollector.Enabled) || metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.KubernetesMetrics] != nil {
|
||||
metricsClient, err := client.CreateMetricsClient(clientConnection, "descheduler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.MetricsClient = metricsClient
|
||||
}
|
||||
|
||||
runFn := func() error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
|
||||
span.AddEvent("Validation Failure", trace.WithAttributes(attribute.String("err", "leaderElection must be used with deschedulingInterval")))
|
||||
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DryRun {
|
||||
klog.V(1).Info("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
klog.V(1).InfoS("Warning: DryRun is set to True. You need to disable it to use Leader Election.")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||
span.AddEvent("Leader Election Failure", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return fmt.Errorf("leaderElection: %w", err)
|
||||
}
|
||||
return nil
|
||||
@@ -519,38 +94,16 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return runFn()
|
||||
}
|
||||
|
||||
func validateVersionCompatibility(discovery discovery.DiscoveryInterface, deschedulerVersionInfo version.Info) error {
|
||||
kubeServerVersionInfo, err := discovery.ServerVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to discover Kubernetes server version: %v", err)
|
||||
}
|
||||
|
||||
kubeServerVersion, err := utilversion.ParseSemantic(kubeServerVersionInfo.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse Kubernetes server version '%s': %v", kubeServerVersionInfo.String(), err)
|
||||
}
|
||||
|
||||
deschedulerMinor, err := strconv.ParseFloat(deschedulerVersionInfo.Minor, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert Descheduler minor version '%s' to float: %v", deschedulerVersionInfo.Minor, err)
|
||||
}
|
||||
|
||||
kubeServerMinor := float64(kubeServerVersion.Minor())
|
||||
if math.Abs(deschedulerMinor-kubeServerMinor) > 3 {
|
||||
return fmt.Errorf(
|
||||
"descheduler version %s.%s may not be supported on your version of Kubernetes %v."+
|
||||
"See compatibility docs for more info: https://github.com/kubernetes-sigs/descheduler#compatibility-matrix",
|
||||
deschedulerVersionInfo.Major,
|
||||
deschedulerVersionInfo.Minor,
|
||||
kubeServerVersionInfo.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
func cachedClient(
|
||||
realClient clientset.Interface,
|
||||
podLister listersv1.PodLister,
|
||||
nodeLister listersv1.NodeLister,
|
||||
namespaceLister listersv1.NamespaceLister,
|
||||
priorityClassLister schedulingv1.PriorityClassLister,
|
||||
) (clientset.Interface, error) {
|
||||
fakeClient := fakeclientset.NewSimpleClientset()
|
||||
// simulate a pod eviction by deleting a pod
|
||||
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
@@ -567,23 +120,132 @@ func podEvictionReactionFnc(fakeClient *fakeclientset.Clientset) func(action cor
|
||||
}
|
||||
// fallback to the default reactor
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
|
||||
pods, err := podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list pods: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range pods {
|
||||
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range nodes {
|
||||
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaces, err := namespaceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list namespaces: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range namespaces {
|
||||
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
priorityClasses, err := priorityClassLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
|
||||
}
|
||||
|
||||
for _, item := range priorityClasses {
|
||||
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fakeClient, nil
|
||||
}
|
||||
|
||||
type tokenReconciliation int
|
||||
// evictorImpl implements the Evictor interface so plugins
|
||||
// can evict a pod without importing a specific pod evictor
|
||||
type evictorImpl struct {
|
||||
podEvictor *evictions.PodEvictor
|
||||
evictorFilter framework.EvictorPlugin
|
||||
}
|
||||
|
||||
const (
|
||||
noReconciliation tokenReconciliation = iota
|
||||
inClusterReconciliation
|
||||
secretReconciliation
|
||||
)
|
||||
var _ framework.Evictor = &evictorImpl{}
|
||||
|
||||
// Filter checks if a pod can be evicted
|
||||
func (ei *evictorImpl) Filter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.Filter(pod)
|
||||
}
|
||||
|
||||
// PreEvictionFilter checks if pod can be evicted right before eviction
|
||||
func (ei *evictorImpl) PreEvictionFilter(pod *v1.Pod) bool {
|
||||
return ei.evictorFilter.PreEvictionFilter(pod)
|
||||
}
|
||||
|
||||
// Evict evicts a pod (no pre-check performed)
|
||||
func (ei *evictorImpl) Evict(ctx context.Context, pod *v1.Pod, opts evictions.EvictOptions) bool {
|
||||
return ei.podEvictor.EvictPod(ctx, pod, opts)
|
||||
}
|
||||
|
||||
func (ei *evictorImpl) NodeLimitExceeded(node *v1.Node) bool {
|
||||
return ei.podEvictor.NodeLimitExceeded(node)
|
||||
}
|
||||
|
||||
// handleImpl implements the framework handle which gets passed to plugins
|
||||
type handleImpl struct {
|
||||
clientSet clientset.Interface
|
||||
getPodsAssignedToNodeFunc podutil.GetPodsAssignedToNodeFunc
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
evictor *evictorImpl
|
||||
}
|
||||
|
||||
var _ framework.Handle = &handleImpl{}
|
||||
|
||||
// ClientSet retrieves kube client set
|
||||
func (hi *handleImpl) ClientSet() clientset.Interface {
|
||||
return hi.clientSet
|
||||
}
|
||||
|
||||
// GetPodsAssignedToNodeFunc retrieves GetPodsAssignedToNodeFunc implementation
|
||||
func (hi *handleImpl) GetPodsAssignedToNodeFunc() podutil.GetPodsAssignedToNodeFunc {
|
||||
return hi.getPodsAssignedToNodeFunc
|
||||
}
|
||||
|
||||
// SharedInformerFactory retrieves shared informer factory
|
||||
func (hi *handleImpl) SharedInformerFactory() informers.SharedInformerFactory {
|
||||
return hi.sharedInformerFactory
|
||||
}
|
||||
|
||||
// Evictor retrieves evictor so plugins can filter and evict pods
|
||||
func (hi *handleImpl) Evictor() framework.Evictor {
|
||||
return hi.evictor
|
||||
}
|
||||
|
||||
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string) error {
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "RunDeschedulerStrategies")
|
||||
defer span.End()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
podLister := sharedInformerFactory.Core().V1().Pods().Lister()
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
namespaceLister := sharedInformerFactory.Core().V1().Namespaces().Lister()
|
||||
priorityClassLister := sharedInformerFactory.Scheduling().V1().PriorityClasses().Lister()
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodeSelector string
|
||||
if deschedulerPolicy.NodeSelector != nil {
|
||||
@@ -596,88 +258,155 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
} else {
|
||||
eventClient = rs.Client
|
||||
}
|
||||
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, eventClient)
|
||||
defer eventBroadcaster.Shutdown()
|
||||
|
||||
var namespacedSharedInformerFactory informers.SharedInformerFactory
|
||||
metricProviderTokenReconciliation := noReconciliation
|
||||
|
||||
prometheusProvider := metricsProviderListToMap(deschedulerPolicy.MetricsProviders)[api.PrometheusMetrics]
|
||||
if prometheusProvider != nil && prometheusProvider.Prometheus != nil && prometheusProvider.Prometheus.URL != "" {
|
||||
if prometheusProvider.Prometheus.AuthToken != nil {
|
||||
// Will get reconciled
|
||||
namespacedSharedInformerFactory = informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields), informers.WithNamespace(prometheusProvider.Prometheus.AuthToken.SecretReference.Namespace))
|
||||
metricProviderTokenReconciliation = secretReconciliation
|
||||
} else {
|
||||
// Use the sa token and assume it has the sufficient permissions to authenticate
|
||||
metricProviderTokenReconciliation = inClusterReconciliation
|
||||
}
|
||||
}
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, eventRecorder, sharedInformerFactory, namespacedSharedInformerFactory)
|
||||
if err != nil {
|
||||
span.AddEvent("Failed to create new descheduler", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
return err
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.Start(ctx.Done())
|
||||
}
|
||||
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
descheduler.podEvictor.WaitForEventHandlersSync(ctx)
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
namespacedSharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
}
|
||||
|
||||
if descheduler.metricsCollector != nil {
|
||||
go func() {
|
||||
klog.V(2).Infof("Starting metrics collector")
|
||||
descheduler.metricsCollector.Run(ctx)
|
||||
klog.V(2).Infof("Stopped metrics collector")
|
||||
}()
|
||||
klog.V(2).Infof("Waiting for metrics collector to sync")
|
||||
if err := wait.PollWithContext(ctx, time.Second, time.Minute, func(context.Context) (done bool, err error) {
|
||||
return descheduler.metricsCollector.HasSynced(), nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("unable to wait for metrics collector to sync: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if metricProviderTokenReconciliation == secretReconciliation {
|
||||
go descheduler.runAuthenticationSecretReconciler(ctx)
|
||||
}
|
||||
|
||||
wait.NonSlidingUntil(func() {
|
||||
if metricProviderTokenReconciliation == inClusterReconciliation {
|
||||
// Read the sa token and assume it has the sufficient permissions to authenticate
|
||||
if err := descheduler.reconcileInClusterSAToken(); err != nil {
|
||||
klog.ErrorS(err, "unable to reconcile an in cluster SA token")
|
||||
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeLister, nodeSelector)
|
||||
if err != nil {
|
||||
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if len(nodes) <= 1 {
|
||||
klog.V(1).InfoS("The cluster size is 0 or 1 meaning eviction causes service disruption or degradation. So aborting..")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
var podEvictorClient clientset.Interface
|
||||
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
|
||||
// So when evicting pods while running multiple strategies in a row have the cummulative effect
|
||||
// as is when evicting pods for real.
|
||||
if rs.DryRun {
|
||||
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
|
||||
// Create a new cache so we start from scratch without any leftovers
|
||||
fakeClient, err := cachedClient(rs.Client, podLister, nodeLister, namespaceLister, priorityClassLister)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods().Informer())
|
||||
if err != nil {
|
||||
klog.Errorf("build get pods assigned to node function error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fakeCtx, cncl := context.WithCancel(context.TODO())
|
||||
defer cncl()
|
||||
fakeSharedInformerFactory.Start(fakeCtx.Done())
|
||||
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
|
||||
|
||||
podEvictorClient = fakeClient
|
||||
} else {
|
||||
podEvictorClient = rs.Client
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Building a pod evictor")
|
||||
podEvictor := evictions.NewPodEvictor(
|
||||
podEvictorClient,
|
||||
evictionPolicyGroupVersion,
|
||||
rs.DryRun,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
!rs.DisableMetrics,
|
||||
eventRecorder,
|
||||
)
|
||||
|
||||
var enabledDeschedulePlugins []framework.DeschedulePlugin
|
||||
var enabledBalancePlugins []framework.BalancePlugin
|
||||
|
||||
// Build plugins
|
||||
for _, profile := range deschedulerPolicy.Profiles {
|
||||
pc := getPluginConfig(defaultevictor.PluginName, profile.PluginConfigs)
|
||||
if pc == nil {
|
||||
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", defaultevictor.PluginName, "profile", profile.Name)
|
||||
continue
|
||||
}
|
||||
evictorFilter, err := defaultevictor.New(
|
||||
pc.Args,
|
||||
&handleImpl{
|
||||
clientSet: rs.Client,
|
||||
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
klog.ErrorS(fmt.Errorf("unable to construct a plugin"), "skipping plugin", "plugin", defaultevictor.PluginName)
|
||||
continue
|
||||
}
|
||||
handle := &handleImpl{
|
||||
clientSet: rs.Client,
|
||||
getPodsAssignedToNodeFunc: getPodsAssignedToNode,
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
evictor: &evictorImpl{
|
||||
podEvictor: podEvictor,
|
||||
evictorFilter: evictorFilter.(framework.EvictorPlugin),
|
||||
},
|
||||
}
|
||||
// Assuming only a list of enabled extension points.
|
||||
// Later, when a default list of plugins and their extension points is established,
|
||||
// compute the list of enabled extension points as (DefaultEnabled + Enabled - Disabled)
|
||||
for _, plugin := range append(profile.Plugins.Deschedule.Enabled, profile.Plugins.Balance.Enabled...) {
|
||||
pc := getPluginConfig(plugin, profile.PluginConfigs)
|
||||
if pc == nil {
|
||||
klog.ErrorS(fmt.Errorf("unable to get plugin config"), "skipping plugin", "plugin", plugin)
|
||||
continue
|
||||
}
|
||||
registryPlugin, ok := pluginbuilder.PluginRegistry[plugin]
|
||||
pgFnc := registryPlugin.PluginBuilder
|
||||
if !ok {
|
||||
klog.ErrorS(fmt.Errorf("unable to find plugin in the pluginsMap"), "skipping plugin", "plugin", plugin)
|
||||
}
|
||||
pg, err := pgFnc(pc.Args, handle)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to initialize a plugin", "pluginName", plugin)
|
||||
}
|
||||
if pg != nil {
|
||||
switch v := pg.(type) {
|
||||
case framework.DeschedulePlugin:
|
||||
enabledDeschedulePlugins = append(enabledDeschedulePlugins, v)
|
||||
case framework.BalancePlugin:
|
||||
enabledBalancePlugins = append(enabledBalancePlugins, v)
|
||||
default:
|
||||
klog.ErrorS(fmt.Errorf("unknown plugin extension point"), "skipping plugin", "plugin", plugin)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A next context is created here intentionally to avoid nesting the spans via context.
|
||||
sCtx, sSpan := tracing.Tracer().Start(ctx, "NonSlidingUntil")
|
||||
defer sSpan.End()
|
||||
// Execute extension points
|
||||
for _, pg := range enabledDeschedulePlugins {
|
||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||
// work, we are currently passing this via context. To be removed
|
||||
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||
status := pg.Deschedule(childCtx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := nodeutil.ReadyNodes(sCtx, rs.Client, descheduler.sharedInformerFactory.Core().V1().Nodes().Lister(), nodeSelector)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to detect ready nodes", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
err = descheduler.runDeschedulerLoop(sCtx, nodes)
|
||||
if err != nil {
|
||||
sSpan.AddEvent("Failed to run descheduler loop", trace.WithAttributes(attribute.String("err", err.Error())))
|
||||
klog.Error(err)
|
||||
cancel()
|
||||
return
|
||||
for _, pg := range enabledBalancePlugins {
|
||||
// TODO: strategyName should be accessible from within the strategy using a framework
|
||||
// handle or function which the Evictor has access to. For migration/in-progress framework
|
||||
// work, we are currently passing this via context. To be removed
|
||||
// (See discussion thread https://github.com/kubernetes-sigs/descheduler/pull/885#discussion_r919962292)
|
||||
childCtx := context.WithValue(ctx, "strategyName", pg.Name())
|
||||
status := pg.Balance(childCtx, nodes)
|
||||
if status != nil && status.Err != nil {
|
||||
klog.ErrorS(status.Err, "plugin finished with error", "pluginName", pg.Name())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(1).InfoS("Number of evicted pods", "totalEvicted", podEvictor.TotalEvicted())
|
||||
|
||||
// If there was no interval specified, send a signal to the stopChannel to end the wait.Until loop after 1 iteration
|
||||
if rs.DeschedulingInterval.Seconds() == 0 {
|
||||
cancel()
|
||||
@@ -687,32 +416,25 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) (*api.PluginConfig, int) {
|
||||
for idx, pluginConfig := range pluginConfigs {
|
||||
func getPluginConfig(pluginName string, pluginConfigs []api.PluginConfig) *api.PluginConfig {
|
||||
for _, pluginConfig := range pluginConfigs {
|
||||
if pluginConfig.Name == pluginName {
|
||||
return &pluginConfig, idx
|
||||
return &pluginConfig
|
||||
}
|
||||
}
|
||||
return nil, 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClients(clientConnection componentbaseconfig.ClientConnectionConfiguration) (clientset.Interface, clientset.Interface, error) {
|
||||
kClient, err := client.CreateClient(clientConnection, "descheduler")
|
||||
func createClients(kubeconfig string) (clientset.Interface, clientset.Interface, error) {
|
||||
kClient, err := client.CreateClient(kubeconfig, "descheduler")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
eventClient, err := client.CreateClient(clientConnection, "")
|
||||
eventClient, err := client.CreateClient(kubeconfig, "")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return kClient, eventClient, nil
|
||||
}
|
||||
|
||||
func trimManagedFields(obj interface{}) (interface{}, error) {
|
||||
if accessor, err := meta.Accessor(obj); err == nil {
|
||||
accessor.SetManagedFields(nil)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
@@ -2,221 +2,46 @@ package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
apiversion "k8s.io/apimachinery/pkg/version"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/pluginregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/defaultevictor"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/nodeutilization"
|
||||
"sigs.k8s.io/descheduler/pkg/api/v1alpha1"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/pluginbuilder"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removeduplicates"
|
||||
"sigs.k8s.io/descheduler/pkg/framework/plugins/removepodsviolatingnodetaints"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
deschedulerversion "sigs.k8s.io/descheduler/pkg/version"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
var (
|
||||
podEvictionError = errors.New("PodEvictionError")
|
||||
tooManyRequestsError = &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Code: http.StatusTooManyRequests,
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "admission webhook \"virt-launcher-eviction-interceptor.kubevirt.io\" denied the request: Eviction triggered evacuation of VMI",
|
||||
},
|
||||
}
|
||||
nodesgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
podsgvr = schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "pods"}
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: false, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func initPluginRegistry() {
|
||||
pluginregistry.PluginRegistry = pluginregistry.NewRegistry()
|
||||
pluginregistry.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicates{}, &removeduplicates.RemoveDuplicatesArgs{}, removeduplicates.ValidateRemoveDuplicatesArgs, removeduplicates.SetDefaults_RemoveDuplicatesArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(defaultevictor.PluginName, defaultevictor.New, &defaultevictor.DefaultEvictor{}, &defaultevictor.DefaultEvictorArgs{}, defaultevictor.ValidateDefaultEvictorArgs, defaultevictor.SetDefaults_DefaultEvictorArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaints{}, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, removepodsviolatingnodetaints.ValidateRemovePodsViolatingNodeTaintsArgs, removepodsviolatingnodetaints.SetDefaults_RemovePodsViolatingNodeTaintsArgs, pluginregistry.PluginRegistry)
|
||||
pluginregistry.Register(nodeutilization.LowNodeUtilizationPluginName, nodeutilization.NewLowNodeUtilization, &nodeutilization.LowNodeUtilization{}, &nodeutilization.LowNodeUtilizationArgs{}, nodeutilization.ValidateLowNodeUtilizationArgs, nodeutilization.SetDefaults_LowNodeUtilizationArgs, pluginregistry.PluginRegistry)
|
||||
}
|
||||
|
||||
func removePodsViolatingNodeTaintsPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemovePodsViolatingNodeTaints",
|
||||
Args: &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Deschedule: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemovePodsViolatingNodeTaints",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicatesPolicy() *api.DeschedulerPolicy {
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: "RemoveDuplicates",
|
||||
Args: &removeduplicates.RemoveDuplicatesArgs{},
|
||||
},
|
||||
{
|
||||
Name: "DefaultEvictor",
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"DefaultEvictor",
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
"RemoveDuplicates",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func lowNodeUtilizationPolicy(thresholds, targetThresholds api.ResourceThresholds, metricsEnabled bool) *api.DeschedulerPolicy {
|
||||
var metricsSource api.MetricsSource = ""
|
||||
if metricsEnabled {
|
||||
metricsSource = api.KubernetesMetrics
|
||||
}
|
||||
return &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{
|
||||
{
|
||||
Name: "Profile",
|
||||
PluginConfigs: []api.PluginConfig{
|
||||
{
|
||||
Name: nodeutilization.LowNodeUtilizationPluginName,
|
||||
Args: &nodeutilization.LowNodeUtilizationArgs{
|
||||
Thresholds: thresholds,
|
||||
TargetThresholds: targetThresholds,
|
||||
MetricsUtilization: &nodeutilization.MetricsUtilization{
|
||||
Source: metricsSource,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: defaultevictor.PluginName,
|
||||
Args: &defaultevictor.DefaultEvictorArgs{},
|
||||
},
|
||||
},
|
||||
Plugins: api.Plugins{
|
||||
Filter: api.PluginSet{
|
||||
Enabled: []string{
|
||||
defaultevictor.PluginName,
|
||||
},
|
||||
},
|
||||
Balance: api.PluginSet{
|
||||
Enabled: []string{
|
||||
nodeutilization.LowNodeUtilizationPluginName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initDescheduler(t *testing.T, ctx context.Context, featureGates featuregate.FeatureGate, internalDeschedulerPolicy *api.DeschedulerPolicy, metricsClient metricsclient.Interface, objects ...runtime.Object) (*options.DeschedulerServer, *descheduler, *fakeclientset.Clientset) {
|
||||
client := fakeclientset.NewSimpleClientset(objects...)
|
||||
eventClient := fakeclientset.NewSimpleClientset(objects...)
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to initialize server: %v", err)
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = featureGates
|
||||
rs.MetricsClient = metricsClient
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactoryWithOptions(rs.Client, 0, informers.WithTransform(trimManagedFields))
|
||||
eventBroadcaster, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
descheduler, err := newDescheduler(ctx, rs, internalDeschedulerPolicy, "v1", eventRecorder, sharedInformerFactory, nil)
|
||||
if err != nil {
|
||||
eventBroadcaster.Shutdown()
|
||||
t.Fatalf("Unable to create a descheduler instance: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
return rs, descheduler, client
|
||||
}
|
||||
|
||||
func TestTaintsUpdated(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||
pluginbuilder.Register(removepodsviolatingnodetaints.PluginName, removepodsviolatingnodetaints.New, &removepodsviolatingnodetaints.RemovePodsViolatingNodeTaintsArgs{}, pluginbuilder.PluginRegistry)
|
||||
ctx := context.Background()
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod(fmt.Sprintf("pod_1_%s", n1.Name), 200, 0, n1.Name, nil)
|
||||
p1.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
p1.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
|
||||
{},
|
||||
}
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2, p1)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemovePodsViolatingNodeTaints": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -224,7 +49,6 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -248,9 +72,14 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removePodsViolatingNodeTaintsPolicy(), "v1"); err != nil {
|
||||
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
@@ -260,8 +89,8 @@ func TestTaintsUpdated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDuplicate(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
pluginbuilder.PluginRegistry = pluginbuilder.NewRegistry()
|
||||
pluginbuilder.Register(removeduplicates.PluginName, removeduplicates.New, &removeduplicates.RemoveDuplicatesArgs{}, pluginbuilder.PluginRegistry)
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
@@ -280,6 +109,13 @@ func TestDuplicate(t *testing.T) {
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
eventClient := fakeclientset.NewSimpleClientset(node1, node2, p1, p2, p3)
|
||||
dp := &v1alpha1.DeschedulerPolicy{
|
||||
Strategies: v1alpha1.StrategyList{
|
||||
"RemoveDuplicates": v1alpha1.DeschedulerStrategy{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
@@ -287,7 +123,6 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
|
||||
pods, err := client.CoreV1().Pods(p1.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@@ -299,14 +134,18 @@ func TestDuplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
client.PrependReactor("create", "pods", podEvictionReactionFuc(&evictedPods))
|
||||
|
||||
if err := RunDeschedulerStrategies(ctx, rs, removeDuplicatesPolicy(), "v1"); err != nil {
|
||||
internalDeschedulerPolicy, err := V1alpha1ToInternal(client, dp, pluginbuilder.PluginRegistry)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to convert v1alpha1 to v1alpha2: %v", err)
|
||||
}
|
||||
if err := RunDeschedulerStrategies(ctx, rs, internalDeschedulerPolicy, "v1"); err != nil {
|
||||
t.Fatalf("Unable to run descheduler strategies: %v", err)
|
||||
}
|
||||
|
||||
if len(evictedPods) == 0 {
|
||||
t.Fatalf("Unable to evict pods\n")
|
||||
t.Fatalf("Unable to evict pod, node taint did not get propagated to descheduler strategies %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,7 +156,7 @@ func TestRootCancel(t *testing.T) {
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||
dp := &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{}, // no strategies needed for this test
|
||||
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
@@ -327,7 +166,6 @@ func TestRootCancel(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 100 * time.Millisecond
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -353,7 +191,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
client := fakeclientset.NewSimpleClientset(n1, n2)
|
||||
eventClient := fakeclientset.NewSimpleClientset(n1, n2)
|
||||
dp := &api.DeschedulerPolicy{
|
||||
Profiles: []api.DeschedulerProfile{}, // no strategies needed for this test
|
||||
Profiles: []api.Profile{}, // no strategies needed for this test
|
||||
}
|
||||
|
||||
rs, err := options.NewDeschedulerServer()
|
||||
@@ -363,7 +201,6 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
rs.Client = client
|
||||
rs.EventClient = eventClient
|
||||
rs.DeschedulingInterval = 0
|
||||
rs.DefaultFeatureGates = initFeatureGates()
|
||||
errChan := make(chan error, 1)
|
||||
defer close(errChan)
|
||||
|
||||
@@ -382,67 +219,7 @@ func TestRootCancelWithNoInterval(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateVersionCompatibility(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
deschedulerVersion deschedulerversion.Info
|
||||
serverVersion string
|
||||
expectError bool
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "no error when descheduler minor equals to server minor",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 behind server minor",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "23"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "no error when descheduler minor is 3 ahead of server minor",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "26"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 behind server minor",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "22"},
|
||||
serverVersion: "v1.26.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "error when descheduler minor is 4 ahead of server minor",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "27"},
|
||||
serverVersion: "v1.23.1",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "no error when using managed provider version",
|
||||
deschedulerVersion: deschedulerversion.Info{Major: "0", Minor: "25"},
|
||||
serverVersion: "v1.25.12-eks-2d98532",
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
fakeDiscovery, _ := client.Discovery().(*fakediscovery.FakeDiscovery)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fakeDiscovery.FakedServerVersion = &apiversion.Info{GitVersion: tc.serverVersion}
|
||||
err := validateVersionCompatibility(fakeDiscovery, tc.deschedulerVersion)
|
||||
|
||||
hasError := err != nil
|
||||
if tc.expectError != hasError {
|
||||
t.Error("unexpected version compatibility behavior")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackground func(podName string) bool, evictionErr error) func(action core.Action) (bool, runtime.Object, error) {
|
||||
func podEvictionReactionFuc(evictedPods *[]string) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
@@ -450,420 +227,9 @@ func podEvictionReactionTestingFnc(evictedPods *[]string, isEvictionsInBackgroun
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
if isEvictionsInBackground != nil && isEvictionsInBackground(eviction.GetName()) {
|
||||
return true, nil, tooManyRequestsError
|
||||
}
|
||||
if evictionErr != nil {
|
||||
return true, nil, evictionErr
|
||||
}
|
||||
*evictedPods = append(*evictedPods, eviction.GetName())
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil // fallback to the default reactor
|
||||
}
|
||||
}
|
||||
|
||||
func taintNodeNoSchedule(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "key",
|
||||
Value: "value",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodEvictorReset(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
rs, descheduler, client := initDescheduler(t, ctxCancel, initFeatureGates(), internalDeschedulerPolicy, nil, node1, node2, p1, p2)
|
||||
defer cancel()
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, nil, nil))
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, nil)
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 2 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,2,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// a single pod eviction expected
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 real evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 4 || len(fakeEvictedPods) != 0 {
|
||||
t.Fatalf("Expected (2,4,0) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
// check the fake client syncing and the right pods evicted
|
||||
klog.Infof("Enabling the dry run mode")
|
||||
rs.DryRun = true
|
||||
evictedPods = []string{}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 2 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 2 {
|
||||
t.Fatalf("Expected (2,0,2) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
|
||||
klog.Infof("2 pod eviction expected per a descheduling cycle, 4 fake evictions in total")
|
||||
if err := descheduler.runDeschedulerLoop(ctx, nodes); err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
if descheduler.podEvictor.TotalEvicted() != 2 || len(evictedPods) != 0 || len(fakeEvictedPods) != 4 {
|
||||
t.Fatalf("Expected (2,0,4) pods evicted, got (%v, %v, %v) instead", descheduler.podEvictor.TotalEvicted(), len(evictedPods), len(fakeEvictedPods))
|
||||
}
|
||||
}
|
||||
|
||||
func checkTotals(t *testing.T, ctx context.Context, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
if total := descheduler.podEvictor.TotalEvictionRequests(); total != totalEvictionRequests {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", totalEvictionRequests, total)
|
||||
}
|
||||
if total := descheduler.podEvictor.TotalEvicted(); total != totalEvicted {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", totalEvicted, total)
|
||||
}
|
||||
t.Logf("Total evictions: %v, total eviction requests: %v, total evictions and eviction requests: %v", totalEvicted, totalEvictionRequests, totalEvicted+totalEvictionRequests)
|
||||
}
|
||||
|
||||
func runDeschedulingCycleAndCheckTotals(t *testing.T, ctx context.Context, nodes []*v1.Node, descheduler *descheduler, totalEvictionRequests, totalEvicted uint) {
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
checkTotals(t, ctx, descheduler, totalEvictionRequests, totalEvicted)
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCache(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
pod.Status.Phase = v1.PodRunning
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 100, 0, node1.Name, updatePod)
|
||||
|
||||
internalDeschedulerPolicy := removePodsViolatingNodeTaintsPolicy()
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, internalDeschedulerPolicy, nil, node1, node2, p1, p2, p3, p4)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
// No evicted pod is actually deleted on purpose so the test can run the descheduling cycle repeatedly
|
||||
// without recreating the pods.
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background got initiated")
|
||||
p2.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Another eviction in background got initiated")
|
||||
p1.Annotations[evictions.EvictionInProgressAnnotationKey] = ""
|
||||
if _, err := client.CoreV1().Pods(p1.Namespace).Update(context.TODO(), p1, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Repeat the same as previously to confirm no more evictions in background are requested")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 2, 2)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed")
|
||||
if err := client.CoreV1().Pods(p1.Namespace).Delete(context.TODO(), p1.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 2)
|
||||
|
||||
klog.Infof("Scenario: A new pod without eviction in background added")
|
||||
if _, err := client.CoreV1().Pods(p5.Namespace).Create(context.TODO(), p5, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions increased after running a descheduling cycle")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background canceled => eviction in progress annotation removed")
|
||||
delete(p2.Annotations, evictions.EvictionInProgressAnnotationKey)
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to update a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
checkTotals(t, ctx, descheduler, 0, 3)
|
||||
|
||||
klog.Infof("Scenario: Re-run the descheduling cycle to re-request eviction in background")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 1, 3)
|
||||
|
||||
klog.Infof("Scenario: Eviction in background completed with a pod in completed state")
|
||||
p2.Status.Phase = v1.PodSucceeded
|
||||
if _, err := client.CoreV1().Pods(p2.Namespace).Update(context.TODO(), p2, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("Check the number of evictions in background decreased")
|
||||
runDeschedulingCycleAndCheckTotals(t, ctx, nodes, descheduler, 0, 3)
|
||||
}
|
||||
|
||||
func TestDeschedulingLimits(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
policy *api.DeschedulerPolicy
|
||||
limit uint
|
||||
}{
|
||||
{
|
||||
description: "limits per node",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNode = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per namespace",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictPerNamespace = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
{
|
||||
description: "limits per cycle",
|
||||
policy: func() *api.DeschedulerPolicy {
|
||||
policy := removePodsViolatingNodeTaintsPolicy()
|
||||
policy.MaxNoOfPodsToEvictTotal = utilptr.To[uint](4)
|
||||
return policy
|
||||
}(),
|
||||
limit: uint(4),
|
||||
},
|
||||
}
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
evictions.EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
_, descheduler, client := initDescheduler(t, ctxCancel, featureGates, tc.policy, nil, node1, node2)
|
||||
defer cancel()
|
||||
|
||||
var fakeEvictedPods []string
|
||||
descheduler.podEvictionReactionFnc = func(*fakeclientset.Clientset) func(action core.Action) (bool, runtime.Object, error) {
|
||||
return podEvictionReactionTestingFnc(&fakeEvictedPods, nil, podEvictionError)
|
||||
}
|
||||
|
||||
var evictedPods []string
|
||||
client.PrependReactor("create", "pods", podEvictionReactionTestingFnc(&evictedPods, func(name string) bool { return name == "p1" || name == "p2" }, nil))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
pods := []*v1.Pod{
|
||||
test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground),
|
||||
test.BuildTestPod("p3", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p4", 100, 0, node1.Name, updatePod),
|
||||
test.BuildTestPod("p5", 100, 0, node1.Name, updatePod),
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
rand.Shuffle(len(pods), func(i, j int) { pods[i], pods[j] = pods[j], pods[i] })
|
||||
func() {
|
||||
for j := 0; j < 5; j++ {
|
||||
idx := j
|
||||
if _, err := client.CoreV1().Pods(pods[idx].Namespace).Create(context.TODO(), pods[idx], metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("unable to create a pod: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CoreV1().Pods(pods[idx].Namespace).Delete(context.TODO(), pods[idx].Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("unable to delete a pod: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalERs := descheduler.podEvictor.TotalEvictionRequests()
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalERs+totalEs > tc.limit {
|
||||
t.Fatalf("Expected %v evictions and eviction requests in total, got %v instead", tc.limit, totalERs+totalEs)
|
||||
}
|
||||
t.Logf("Total evictions and eviction requests: %v (er=%v, e=%v)", totalERs+totalEs, totalERs, totalEs)
|
||||
}()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadAwareDescheduling(t *testing.T) {
|
||||
initPluginRegistry()
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, taintNodeNoSchedule)
|
||||
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
nodes := []*v1.Node{node1, node2}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 300, 0, node1.Name, updatePod)
|
||||
p2 := test.BuildTestPod("p2", 300, 0, node1.Name, updatePod)
|
||||
p3 := test.BuildTestPod("p3", 300, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 300, 0, node1.Name, updatePod)
|
||||
p5 := test.BuildTestPod("p5", 300, 0, node1.Name, updatePod)
|
||||
|
||||
nodemetricses := []*v1beta1.NodeMetrics{
|
||||
test.BuildNodeMetrics("n1", 2400, 3000),
|
||||
test.BuildNodeMetrics("n2", 400, 0),
|
||||
}
|
||||
|
||||
podmetricses := []*v1beta1.PodMetrics{
|
||||
test.BuildPodMetrics("p1", 400, 0),
|
||||
test.BuildPodMetrics("p2", 400, 0),
|
||||
test.BuildPodMetrics("p3", 400, 0),
|
||||
test.BuildPodMetrics("p4", 400, 0),
|
||||
test.BuildPodMetrics("p5", 400, 0),
|
||||
}
|
||||
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
for _, nodemetrics := range nodemetricses {
|
||||
metricsClientset.Tracker().Create(nodesgvr, nodemetrics, "")
|
||||
}
|
||||
for _, podmetrics := range podmetricses {
|
||||
metricsClientset.Tracker().Create(podsgvr, podmetrics, podmetrics.Namespace)
|
||||
}
|
||||
|
||||
policy := lowNodeUtilizationPolicy(
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
api.ResourceThresholds{
|
||||
v1.ResourceCPU: 50,
|
||||
v1.ResourcePods: 50,
|
||||
},
|
||||
true, // enabled metrics utilization
|
||||
)
|
||||
policy.MetricsProviders = []api.MetricsProvider{{Source: api.KubernetesMetrics}}
|
||||
|
||||
ctxCancel, cancel := context.WithCancel(ctx)
|
||||
_, descheduler, _ := initDescheduler(
|
||||
t,
|
||||
ctxCancel,
|
||||
initFeatureGates(),
|
||||
policy,
|
||||
metricsClientset,
|
||||
node1, node2, p1, p2, p3, p4, p5)
|
||||
defer cancel()
|
||||
|
||||
// This needs to be run since the metrics collector is started
|
||||
// after newDescheduler in RunDeschedulerStrategies.
|
||||
descheduler.metricsCollector.Collect(ctx)
|
||||
|
||||
err := descheduler.runDeschedulerLoop(ctx, nodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run a descheduling loop: %v", err)
|
||||
}
|
||||
totalEs := descheduler.podEvictor.TotalEvicted()
|
||||
if totalEs != 2 {
|
||||
t.Fatalf("Expected %v evictions in total, got %v instead", 2, totalEs)
|
||||
}
|
||||
t.Logf("Total evictions: %v", totalEs)
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package evictions
|
||||
|
||||
type EvictionNodeLimitError struct {
|
||||
node string
|
||||
}
|
||||
|
||||
func (e EvictionNodeLimitError) Error() string {
|
||||
return "maximum number of evicted pods per node reached"
|
||||
}
|
||||
|
||||
func NewEvictionNodeLimitError(node string) *EvictionNodeLimitError {
|
||||
return &EvictionNodeLimitError{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNodeLimitError{}
|
||||
|
||||
type EvictionNamespaceLimitError struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (e EvictionNamespaceLimitError) Error() string {
|
||||
return "maximum number of evicted pods per namespace reached"
|
||||
}
|
||||
|
||||
func NewEvictionNamespaceLimitError(namespace string) *EvictionNamespaceLimitError {
|
||||
return &EvictionNamespaceLimitError{
|
||||
namespace: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
var _ error = &EvictionNamespaceLimitError{}
|
||||
|
||||
type EvictionTotalLimitError struct{}
|
||||
|
||||
func (e EvictionTotalLimitError) Error() string {
|
||||
return "maximum number of evicted pods per a descheduling cycle reached"
|
||||
}
|
||||
|
||||
func NewEvictionTotalLimitError() *EvictionTotalLimitError {
|
||||
return &EvictionTotalLimitError{}
|
||||
}
|
||||
|
||||
var _ error = &EvictionTotalLimitError{}
|
||||
@@ -19,184 +19,17 @@ package evictions
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/metrics"
|
||||
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
var (
|
||||
assumedEvictionRequestTimeoutSeconds uint = 10 * 60 // 10 minutes
|
||||
evictionRequestsCacheResyncPeriod time.Duration = 10 * time.Minute
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type evictionRequestItem struct {
|
||||
podName, podNamespace, podNodeName string
|
||||
evictionAssumed bool
|
||||
assumedTimestamp metav1.Time
|
||||
}
|
||||
|
||||
type evictionRequestsCache struct {
|
||||
mu sync.RWMutex
|
||||
requests map[string]evictionRequestItem
|
||||
requestsPerNode map[string]uint
|
||||
requestsPerNamespace map[string]uint
|
||||
requestsTotal uint
|
||||
assumedRequestTimeoutSeconds uint
|
||||
}
|
||||
|
||||
func newEvictionRequestsCache(assumedRequestTimeoutSeconds uint) *evictionRequestsCache {
|
||||
return &evictionRequestsCache{
|
||||
requests: make(map[string]evictionRequestItem),
|
||||
requestsPerNode: make(map[string]uint),
|
||||
requestsPerNamespace: make(map[string]uint),
|
||||
assumedRequestTimeoutSeconds: assumedRequestTimeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) run(ctx context.Context) {
|
||||
wait.UntilWithContext(ctx, erc.cleanCache, evictionRequestsCacheResyncPeriod)
|
||||
}
|
||||
|
||||
// cleanCache removes all assumed entries that has not been confirmed
|
||||
// for more than a specified timeout
|
||||
func (erc *evictionRequestsCache) cleanCache(ctx context.Context) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
klog.V(4).Infof("Cleaning cache of assumed eviction requests in background")
|
||||
for uid, item := range erc.requests {
|
||||
if item.evictionAssumed {
|
||||
requestAgeSeconds := uint(metav1.Now().Sub(item.assumedTimestamp.Local()).Seconds())
|
||||
if requestAgeSeconds > erc.assumedRequestTimeoutSeconds {
|
||||
klog.V(4).InfoS("Assumed eviction request in background timed out, deleting", "timeout", erc.assumedRequestTimeoutSeconds, "podNamespace", item.podNamespace, "podName", item.podName)
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNode(nodeName string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNode[nodeName]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsPerNamespace(ns string) uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsPerNamespace[ns]
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) evictionRequestsTotal() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return erc.requestsTotal
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) TotalEvictionRequests() uint {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
return uint(len(erc.requests))
|
||||
}
|
||||
|
||||
// getPodKey returns the string key of a pod.
|
||||
func getPodKey(pod *v1.Pod) string {
|
||||
uid := string(pod.UID)
|
||||
// Every pod is expected to have the UID set.
|
||||
// When the descheduling framework is used for simulation
|
||||
// user created workload may forget to set the UID.
|
||||
if len(uid) == 0 {
|
||||
panic(fmt.Errorf("cannot get cache key for %v/%v pod with empty UID", pod.Namespace, pod.Name))
|
||||
}
|
||||
return uid
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) addPod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{podNamespace: pod.Namespace, podName: pod.Name, podNodeName: pod.Spec.NodeName}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) assumePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
return
|
||||
}
|
||||
erc.requests[uid] = evictionRequestItem{
|
||||
podNamespace: pod.Namespace,
|
||||
podName: pod.Name,
|
||||
podNodeName: pod.Spec.NodeName,
|
||||
evictionAssumed: true,
|
||||
assumedTimestamp: metav1.NewTime(time.Now()),
|
||||
}
|
||||
erc.requestsPerNode[pod.Spec.NodeName]++
|
||||
erc.requestsPerNamespace[pod.Namespace]++
|
||||
erc.requestsTotal++
|
||||
}
|
||||
|
||||
// no locking, expected to be invoked from protected methods only
|
||||
func (erc *evictionRequestsCache) deleteItem(uid string) {
|
||||
erc.requestsPerNode[erc.requests[uid].podNodeName]--
|
||||
if erc.requestsPerNode[erc.requests[uid].podNodeName] == 0 {
|
||||
delete(erc.requestsPerNode, erc.requests[uid].podNodeName)
|
||||
}
|
||||
erc.requestsPerNamespace[erc.requests[uid].podNamespace]--
|
||||
if erc.requestsPerNamespace[erc.requests[uid].podNamespace] == 0 {
|
||||
delete(erc.requestsPerNamespace, erc.requests[uid].podNamespace)
|
||||
}
|
||||
erc.requestsTotal--
|
||||
delete(erc.requests, uid)
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) deletePod(pod *v1.Pod) {
|
||||
erc.mu.Lock()
|
||||
defer erc.mu.Unlock()
|
||||
uid := getPodKey(pod)
|
||||
if _, exists := erc.requests[uid]; exists {
|
||||
erc.deleteItem(uid)
|
||||
}
|
||||
}
|
||||
|
||||
func (erc *evictionRequestsCache) hasPod(pod *v1.Pod) bool {
|
||||
erc.mu.RLock()
|
||||
defer erc.mu.RUnlock()
|
||||
uid := getPodKey(pod)
|
||||
_, exists := erc.requests[uid]
|
||||
return exists
|
||||
}
|
||||
|
||||
var (
|
||||
EvictionRequestAnnotationKey = "descheduler.alpha.kubernetes.io/request-evict-only"
|
||||
EvictionInProgressAnnotationKey = "descheduler.alpha.kubernetes.io/eviction-in-progress"
|
||||
EvictionInBackgroundErrorText = "Eviction triggered evacuation"
|
||||
)
|
||||
|
||||
// nodePodEvictedCount keeps count of pods evicted on node
|
||||
@@ -206,372 +39,145 @@ type (
|
||||
)
|
||||
|
||||
type PodEvictor struct {
|
||||
mu sync.RWMutex
|
||||
client clientset.Interface
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
evictionFailureEventNotification bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
gracePeriodSeconds *int64
|
||||
nodePodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
totalPodCount uint
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
erCache *evictionRequestsCache
|
||||
featureGates featuregate.FeatureGate
|
||||
|
||||
// registeredHandlers contains the registrations of all handlers. It's used to check if all handlers have finished syncing before the scheduling cycles start.
|
||||
registeredHandlers []cache.ResourceEventHandlerRegistration
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
nodepodCount nodePodEvictedCount
|
||||
namespacePodCount namespacePodEvictCount
|
||||
metricsEnabled bool
|
||||
eventRecorder events.EventRecorder
|
||||
}
|
||||
|
||||
func NewPodEvictor(
|
||||
ctx context.Context,
|
||||
client clientset.Interface,
|
||||
policyGroupVersion string,
|
||||
dryRun bool,
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
metricsEnabled bool,
|
||||
eventRecorder events.EventRecorder,
|
||||
podInformer cache.SharedIndexInformer,
|
||||
featureGates featuregate.FeatureGate,
|
||||
options *Options,
|
||||
) (*PodEvictor, error) {
|
||||
if options == nil {
|
||||
options = NewOptions()
|
||||
) *PodEvictor {
|
||||
nodePodCount := make(nodePodEvictedCount)
|
||||
namespacePodCount := make(namespacePodEvictCount)
|
||||
for _, node := range nodes {
|
||||
// Initialize podsEvicted till now with 0.
|
||||
nodePodCount[node.Name] = 0
|
||||
}
|
||||
|
||||
podEvictor := &PodEvictor{
|
||||
client: client,
|
||||
eventRecorder: eventRecorder,
|
||||
policyGroupVersion: options.policyGroupVersion,
|
||||
dryRun: options.dryRun,
|
||||
evictionFailureEventNotification: options.evictionFailureEventNotification,
|
||||
maxPodsToEvictPerNode: options.maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: options.maxPodsToEvictPerNamespace,
|
||||
maxPodsToEvictTotal: options.maxPodsToEvictTotal,
|
||||
gracePeriodSeconds: options.gracePeriodSeconds,
|
||||
metricsEnabled: options.metricsEnabled,
|
||||
nodePodCount: make(nodePodEvictedCount),
|
||||
namespacePodCount: make(namespacePodEvictCount),
|
||||
featureGates: featureGates,
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
|
||||
nodepodCount: nodePodCount,
|
||||
namespacePodCount: namespacePodCount,
|
||||
metricsEnabled: metricsEnabled,
|
||||
eventRecorder: eventRecorder,
|
||||
}
|
||||
|
||||
if featureGates.Enabled(features.EvictionsInBackground) {
|
||||
erCache := newEvictionRequestsCache(assumedEvictionRequestTimeoutSeconds)
|
||||
|
||||
handlerRegistration, err := podInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", obj)
|
||||
return
|
||||
}
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if _, exists := pod.Annotations[EvictionInProgressAnnotationKey]; exists {
|
||||
// Ignore completed/suceeeded or failed pods
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
klog.V(3).InfoS("Eviction in background detected. Adding pod to the cache.", "pod", klog.KObj(pod))
|
||||
erCache.addPod(pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldPod, ok := oldObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert oldObj to *v1.Pod", "oldObj", oldObj)
|
||||
return
|
||||
}
|
||||
newPod, ok := newObj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert newObj to *v1.Pod", "newObj", newObj)
|
||||
return
|
||||
}
|
||||
// Ignore pod's that are not subject to an eviction in background
|
||||
if _, exists := newPod.Annotations[EvictionRequestAnnotationKey]; !exists {
|
||||
if erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background lost annotation. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Remove completed/suceeeded or failed pods from the cache
|
||||
if newPod.Status.Phase == v1.PodSucceeded || newPod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Pod with eviction in background completed. Removing pod from the cache.", "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Ignore any pod that does not have eviction in progress
|
||||
if _, exists := newPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
// In case EvictionInProgressAnnotationKey annotation is not present/removed
|
||||
// it's unclear whether the eviction was restarted or terminated.
|
||||
// If the eviction gets restarted the pod needs to be removed from the cache
|
||||
// to allow re-triggering the eviction.
|
||||
if _, exists := oldPod.Annotations[EvictionInProgressAnnotationKey]; !exists {
|
||||
return
|
||||
}
|
||||
// the annotation was removed -> remove the pod from the cache to allow to
|
||||
// request for eviction again. In case the eviction got restarted requesting
|
||||
// the eviction again is expected to be a no-op. In case the eviction
|
||||
// got terminated with no-retry, requesting a new eviction is a normal
|
||||
// operation.
|
||||
klog.V(3).InfoS("Eviction in background canceled (annotation removed). Removing pod from the cache.", "annotation", EvictionInProgressAnnotationKey, "pod", klog.KObj(newPod))
|
||||
erCache.deletePod(newPod)
|
||||
return
|
||||
}
|
||||
// Pick up the eviction in progress
|
||||
if !erCache.hasPod(newPod) {
|
||||
klog.V(3).InfoS("Eviction in background detected. Updating the cache.", "pod", klog.KObj(newPod))
|
||||
}
|
||||
erCache.addPod(newPod)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
var pod *v1.Pod
|
||||
switch t := obj.(type) {
|
||||
case *v1.Pod:
|
||||
pod = t
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
var ok bool
|
||||
pod, ok = t.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.ErrorS(nil, "Cannot convert to *v1.Pod", "obj", t)
|
||||
return
|
||||
}
|
||||
if erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Pod with eviction in background deleted/evicted. Removing pod from the cache.", "pod", klog.KObj(pod))
|
||||
}
|
||||
erCache.deletePod(pod)
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to register event handler for pod evictor: %v", err)
|
||||
}
|
||||
|
||||
podEvictor.registeredHandlers = append(podEvictor.registeredHandlers, handlerRegistration)
|
||||
|
||||
go erCache.run(ctx)
|
||||
|
||||
podEvictor.erCache = erCache
|
||||
}
|
||||
|
||||
return podEvictor, nil
|
||||
}
|
||||
|
||||
// WaitForEventHandlersSync waits for EventHandlers to sync.
|
||||
// It returns true if it was successful, false if the controller should shut down
|
||||
func (pe *PodEvictor) WaitForEventHandlersSync(ctx context.Context) error {
|
||||
return wait.PollUntilContextCancel(ctx, syncedPollPeriod, true, func(ctx context.Context) (done bool, err error) {
|
||||
for _, handler := range pe.registeredHandlers {
|
||||
if !handler.HasSynced() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// NodeEvicted gives a number of pods evicted for node
|
||||
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.nodePodCount[node.Name]
|
||||
return pe.nodepodCount[node.Name]
|
||||
}
|
||||
|
||||
// TotalEvicted gives a number of pods evicted through all nodes
|
||||
func (pe *PodEvictor) TotalEvicted() uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.totalPodCount
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) ResetCounters() {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.nodePodCount = make(nodePodEvictedCount)
|
||||
pe.namespacePodCount = make(namespacePodEvictCount)
|
||||
pe.totalPodCount = 0
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) SetClient(client clientset.Interface) {
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
pe.client = client
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsTotal() uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsTotal()
|
||||
} else {
|
||||
return 0
|
||||
var total uint
|
||||
for _, count := range pe.nodepodCount {
|
||||
total += count
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNode(node string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNode(node)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) evictionRequestsPerNamespace(ns string) uint {
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.evictionRequestsPerNamespace(ns)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) EvictionRequests(node *v1.Node) uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
return pe.evictionRequestsTotal()
|
||||
}
|
||||
|
||||
func (pe *PodEvictor) TotalEvictionRequests() uint {
|
||||
pe.mu.RLock()
|
||||
defer pe.mu.RUnlock()
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
return pe.erCache.TotalEvictionRequests()
|
||||
} else {
|
||||
return 0
|
||||
// NodeLimitExceeded checks if the number of evictions for a node was exceeded
|
||||
func (pe *PodEvictor) NodeLimitExceeded(node *v1.Node) bool {
|
||||
if pe.maxPodsToEvictPerNode != nil {
|
||||
return pe.nodepodCount[node.Name] == *pe.maxPodsToEvictPerNode
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EvictOptions provides a handle for passing additional info to EvictPod
|
||||
type EvictOptions struct {
|
||||
// Reason allows for passing details about the specific eviction for logging.
|
||||
Reason string
|
||||
// ProfileName allows for passing details about profile for observability.
|
||||
ProfileName string
|
||||
// StrategyName allows for passing details about strategy for observability.
|
||||
StrategyName string
|
||||
}
|
||||
|
||||
// EvictPod evicts a pod while exercising eviction limits.
|
||||
// Returns true when the pod is evicted on the server side.
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) error {
|
||||
if len(pod.UID) == 0 {
|
||||
klog.InfoS("Ignoring pod eviction due to missing UID", "pod", pod)
|
||||
return fmt.Errorf("Pod %v is missing UID", klog.KObj(pod))
|
||||
}
|
||||
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
if pe.erCache.hasPod(pod) {
|
||||
klog.V(3).InfoS("Eviction in background already requested (ignoring)", "pod", klog.KObj(pod))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pe.mu.Lock()
|
||||
defer pe.mu.Unlock()
|
||||
|
||||
var span trace.Span
|
||||
ctx, span = tracing.Tracer().Start(ctx, "EvictPod", trace.WithAttributes(attribute.String("podName", pod.Name), attribute.String("podNamespace", pod.Namespace), attribute.String("reason", opts.Reason), attribute.String("operation", tracing.EvictOperation)))
|
||||
defer span.End()
|
||||
|
||||
if pe.maxPodsToEvictTotal != nil && pe.totalPodCount+pe.evictionRequestsTotal()+1 > *pe.maxPodsToEvictTotal {
|
||||
err := NewEvictionTotalLimitError()
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictTotal)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictTotal)
|
||||
}
|
||||
return err
|
||||
func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, opts EvictOptions) bool {
|
||||
// TODO: Replace context-propagated Strategy name with a defined framework handle for accessing Strategy info
|
||||
strategy := ""
|
||||
if ctx.Value("strategyName") != nil {
|
||||
strategy = ctx.Value("strategyName").(string)
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodePodCount[pod.Spec.NodeName]+pe.evictionRequestsPerNode(pod.Spec.NodeName)+1 > *pe.maxPodsToEvictPerNode {
|
||||
err := NewEvictionNodeLimitError(pod.Spec.NodeName)
|
||||
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[pod.Spec.NodeName]+1 > *pe.maxPodsToEvictPerNode {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNode)
|
||||
}
|
||||
return err
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per node reached"), "limit", *pe.maxPodsToEvictPerNode, "node", pod.Spec.NodeName)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+pe.evictionRequestsPerNamespace(pod.Namespace)+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
err := NewEvictionNamespaceLimitError(pod.Namespace)
|
||||
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": err.Error(), "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace, "pod", klog.KObj(pod))
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (%v)", pod.Spec.NodeName, *pe.maxPodsToEvictPerNamespace)
|
||||
}
|
||||
return err
|
||||
klog.ErrorS(fmt.Errorf("Maximum number of evicted pods per namespace reached"), "limit", *pe.maxPodsToEvictPerNamespace, "namespace", pod.Namespace)
|
||||
return false
|
||||
}
|
||||
|
||||
ignore, err := pe.evictPod(ctx, pod)
|
||||
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
|
||||
if err != nil {
|
||||
// err is used only for logging purposes
|
||||
span.AddEvent("Eviction Failed", trace.WithAttributes(attribute.String("node", pod.Spec.NodeName), attribute.String("err", err.Error())))
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", opts.Reason)
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
if pe.evictionFailureEventNotification {
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeWarning, "EvictionFailed", "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler failed: %v", pod.Spec.NodeName, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if ignore {
|
||||
return nil
|
||||
return false
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName != "" {
|
||||
pe.nodePodCount[pod.Spec.NodeName]++
|
||||
pe.nodepodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
pe.namespacePodCount[pod.Namespace]++
|
||||
pe.totalPodCount++
|
||||
|
||||
if pe.metricsEnabled {
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": opts.StrategyName, "namespace": pod.Namespace, "node": pod.Spec.NodeName, "profile": opts.ProfileName}).Inc()
|
||||
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": pod.Spec.NodeName}).Inc()
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", opts.StrategyName, "node", pod.Spec.NodeName, "profile", opts.ProfileName)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", opts.Reason, "strategy", strategy, "node", pod.Spec.NodeName)
|
||||
reason := opts.Reason
|
||||
if len(reason) == 0 {
|
||||
reason = opts.StrategyName
|
||||
reason = strategy
|
||||
if len(reason) == 0 {
|
||||
reason = "NotSet"
|
||||
}
|
||||
}
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod eviction from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
pe.eventRecorder.Eventf(pod, nil, v1.EventTypeNormal, reason, "Descheduled", "pod evicted from %v node by sigs.k8s.io/descheduler", pod.Spec.NodeName)
|
||||
}
|
||||
return nil
|
||||
return true
|
||||
}
|
||||
|
||||
// return (ignore, err)
|
||||
func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
deleteOptions := &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: pe.gracePeriodSeconds,
|
||||
}
|
||||
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
|
||||
deleteOptions := &metav1.DeleteOptions{}
|
||||
// GracePeriodSeconds ?
|
||||
eviction := &policy.Eviction{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: pe.policyGroupVersion,
|
||||
APIVersion: policyGroupVersion,
|
||||
Kind: eutils.EvictionKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -580,36 +186,13 @@ func (pe *PodEvictor) evictPod(ctx context.Context, pod *v1.Pod) (bool, error) {
|
||||
},
|
||||
DeleteOptions: deleteOptions,
|
||||
}
|
||||
err := pe.client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if pe.featureGates.Enabled(features.EvictionsInBackground) {
|
||||
// eviction in background requested
|
||||
if _, exists := pod.Annotations[EvictionRequestAnnotationKey]; exists {
|
||||
// Simulating https://github.com/kubevirt/kubevirt/pull/11532/files#diff-059cc1fc09e8b469143348cc3aa80b40de987670e008fa18a6fe010061f973c9R77
|
||||
if apierrors.IsTooManyRequests(err) && strings.Contains(err.Error(), EvictionInBackgroundErrorText) {
|
||||
// Ignore eviction of any pod that's failed or completed.
|
||||
// It can happen an eviction in background ends up with the pod stuck in the completed state.
|
||||
// Normally, any request eviction is expected to end with the pod deletion.
|
||||
// However, some custom eviction policies may end up with completed pods around.
|
||||
// Which leads to all the completed pods to be considered still as unfinished evictions in background.
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
klog.V(3).InfoS("Ignoring eviction of a completed/failed pod", "pod", klog.KObj(pod))
|
||||
return true, nil
|
||||
}
|
||||
klog.V(3).InfoS("Eviction in background assumed", "pod", klog.KObj(pod))
|
||||
pe.erCache.assumePod(pod)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
err := client.PolicyV1().Evictions(eviction.Namespace).Evict(ctx, eviction)
|
||||
|
||||
if apierrors.IsTooManyRequests(err) {
|
||||
return false, fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
return fmt.Errorf("error when evicting pod (ignoring) %q: %v", pod.Name, err)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
return fmt.Errorf("pod not found when evicting %q: %v", pod.Name, err)
|
||||
}
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,107 +18,54 @@ package evictions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
const (
|
||||
notFoundText = "pod not found when evicting \"%s\": pods \"%s\" not found"
|
||||
tooManyRequests = "error when evicting pod (ignoring) \"%s\": Too many requests: too many requests"
|
||||
)
|
||||
|
||||
func initFeatureGates() featuregate.FeatureGate {
|
||||
featureGates := featuregate.NewFeatureGate()
|
||||
featureGates.Add(map[featuregate.Feature]featuregate.FeatureSpec{
|
||||
features.EvictionsInBackground: {Default: true, PreRelease: featuregate.Alpha},
|
||||
})
|
||||
return featureGates
|
||||
}
|
||||
|
||||
func TestEvictPod(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("node1", 1000, 2000, 9, nil)
|
||||
pod1 := test.BuildTestPod("p1", 400, 0, "node1", nil)
|
||||
tests := []struct {
|
||||
description string
|
||||
node *v1.Node
|
||||
evictedPod *v1.Pod
|
||||
pods []runtime.Object
|
||||
wantErr error
|
||||
pod *v1.Pod
|
||||
pods []v1.Pod
|
||||
want error
|
||||
}{
|
||||
{
|
||||
description: "test pod eviction - pod present",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{pod1},
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*pod1},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent (not found error)",
|
||||
description: "test pod eviction - pod absent",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(notFoundText, pod1.Name, pod1.Name),
|
||||
},
|
||||
{
|
||||
description: "test pod eviction - pod absent (too many requests error)",
|
||||
node: node1,
|
||||
evictedPod: pod1,
|
||||
pods: []runtime.Object{test.BuildTestPod("p2", 400, 0, "node1", nil), test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
wantErr: fmt.Errorf(tooManyRequests, pod1.Name),
|
||||
pod: pod1,
|
||||
pods: []v1.Pod{*test.BuildTestPod("p2", 400, 0, "node1", nil), *test.BuildTestPod("p3", 450, 0, "node1", nil)},
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fakeClient := fake.NewClientset(test.pods...)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.wantErr
|
||||
})
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := &events.FakeRecorder{}
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
_, got := podEvictor.evictPod(ctx, test.evictedPod)
|
||||
if got != test.wantErr {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.evictedPod.Name, test.wantErr, got)
|
||||
}
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.PodList{Items: test.pods}, nil
|
||||
})
|
||||
got := evictPod(ctx, fakeClient, test.pod, "v1")
|
||||
if got != test.want {
|
||||
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,319 +113,3 @@ func TestPodTypes(t *testing.T) {
|
||||
t.Errorf("Expected p1 to be a normal pod.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPodEvictor(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
pod1 := test.BuildTestPod("pod", 400, 0, "node", nil)
|
||||
type podEvictorTest struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
dryRun bool
|
||||
evictionFailureEventNotification *bool
|
||||
maxPodsToEvictTotal *uint
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
expectedNodeEvictions uint
|
||||
expectedTotalEvictions uint
|
||||
expectedError error
|
||||
// expectedEvent is a slice of strings representing expected events.
|
||||
// Each string in the slice should follow the format: "EventType Reason Message".
|
||||
// - "Warning Failed processing failed"
|
||||
events []string
|
||||
}
|
||||
tests := []podEvictorTest{
|
||||
{
|
||||
description: "one eviction expected with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: total eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: node eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: namespace eviction limit exceeded (0)"},
|
||||
},
|
||||
{
|
||||
description: "eviction error with eviction failure event notification",
|
||||
pod: pod1,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
events: []string{"Warning EvictionFailed pod eviction from node node by sigs.k8s.io/descheduler failed: eviction error"},
|
||||
},
|
||||
{
|
||||
description: "eviction with dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
evictionFailureEventNotification: utilptr.To[bool](true),
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
description: "one eviction expected without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
events: []string{"Normal NotSet pod eviction from node node by sigs.k8s.io/descheduler"},
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on total without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionTotalLimitError(),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](0),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNodeLimitError("node"),
|
||||
},
|
||||
{
|
||||
description: "eviction limit exceeded on node without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](0),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: NewEvictionNamespaceLimitError("default"),
|
||||
},
|
||||
{
|
||||
description: "eviction error without eviction failure event notification",
|
||||
pod: pod1,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 0,
|
||||
expectedTotalEvictions: 0,
|
||||
expectedError: fmt.Errorf("eviction error"),
|
||||
},
|
||||
{
|
||||
description: "eviction without dryRun with eviction failure event notification",
|
||||
pod: pod1,
|
||||
dryRun: true,
|
||||
maxPodsToEvictTotal: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNode: utilptr.To[uint](1),
|
||||
maxPodsToEvictPerNamespace: utilptr.To[uint](1),
|
||||
expectedNodeEvictions: 1,
|
||||
expectedTotalEvictions: 1,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset(pod1)
|
||||
fakeClient.PrependReactor("create", "pods/eviction", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
return true, nil, test.expectedError
|
||||
})
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
eventRecorder := events.NewFakeRecorder(100)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
fakeClient,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
NewOptions().
|
||||
WithDryRun(test.dryRun).
|
||||
WithMaxPodsToEvictTotal(test.maxPodsToEvictTotal).
|
||||
WithMaxPodsToEvictPerNode(test.maxPodsToEvictPerNode).
|
||||
WithEvictionFailureEventNotification(test.evictionFailureEventNotification).
|
||||
WithMaxPodsToEvictPerNamespace(test.maxPodsToEvictPerNamespace),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
stubNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node"}}
|
||||
|
||||
if actualErr := podEvictor.EvictPod(ctx, test.pod, EvictOptions{}); actualErr != nil && actualErr.Error() != test.expectedError.Error() {
|
||||
t.Errorf("Expected error: %v, got: %v", test.expectedError, actualErr)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.NodeEvicted(stubNode); evictions != test.expectedNodeEvictions {
|
||||
t.Errorf("Expected %d node evictions, got %d instead", test.expectedNodeEvictions, evictions)
|
||||
}
|
||||
|
||||
if evictions := podEvictor.TotalEvicted(); evictions != test.expectedTotalEvictions {
|
||||
t.Errorf("Expected %d total evictions, got %d instead", test.expectedTotalEvictions, evictions)
|
||||
}
|
||||
|
||||
// Assert that the events are correct.
|
||||
assertEqualEvents(t, test.events, eventRecorder.Events)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvictionRequestsCacheCleanup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
|
||||
ownerRef1 := test.GetReplicaSetOwnerRefList()
|
||||
updatePod := func(pod *v1.Pod) {
|
||||
pod.Namespace = "dev"
|
||||
pod.ObjectMeta.OwnerReferences = ownerRef1
|
||||
}
|
||||
updatePodWithEvictionInBackground := func(pod *v1.Pod) {
|
||||
updatePod(pod)
|
||||
pod.Annotations = map[string]string{
|
||||
EvictionRequestAnnotationKey: "",
|
||||
}
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, updatePodWithEvictionInBackground)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, updatePod)
|
||||
p4 := test.BuildTestPod("p4", 100, 0, node1.Name, updatePod)
|
||||
|
||||
client := fakeclientset.NewSimpleClientset(node1, p1, p2, p3, p4)
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
_, eventRecorder := utils.GetRecorderAndBroadcaster(ctx, client)
|
||||
|
||||
podEvictor, err := NewPodEvictor(
|
||||
ctx,
|
||||
client,
|
||||
eventRecorder,
|
||||
sharedInformerFactory.Core().V1().Pods().Informer(),
|
||||
initFeatureGates(),
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error when creating a pod evictor: %v", err)
|
||||
}
|
||||
|
||||
client.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||
if action.GetSubresource() == "eviction" {
|
||||
createAct, matched := action.(core.CreateActionImpl)
|
||||
if !matched {
|
||||
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
|
||||
}
|
||||
if eviction, matched := createAct.Object.(*policy.Eviction); matched {
|
||||
podName := eviction.GetName()
|
||||
if podName == "p1" || podName == "p2" {
|
||||
return true, nil, &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonTooManyRequests,
|
||||
Message: "Eviction triggered evacuation",
|
||||
},
|
||||
}
|
||||
}
|
||||
return true, nil, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor.EvictPod(ctx, p1, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p2, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p3, EvictOptions{})
|
||||
podEvictor.EvictPod(ctx, p4, EvictOptions{})
|
||||
|
||||
klog.Infof("2 evictions in background expected, 2 normal evictions")
|
||||
if total := podEvictor.TotalEvictionRequests(); total != 2 {
|
||||
t.Fatalf("Expected %v total eviction requests, got %v instead", 2, total)
|
||||
}
|
||||
if total := podEvictor.TotalEvicted(); total != 2 {
|
||||
t.Fatalf("Expected %v total evictions, got %v instead", 2, total)
|
||||
}
|
||||
|
||||
klog.Infof("2 evictions in background assumed. Wait for few seconds and check the assumed requests timed out")
|
||||
time.Sleep(2 * time.Second)
|
||||
klog.Infof("Checking the assumed requests timed out and were deleted")
|
||||
// Set the timeout to 1s so the cleaning can be tested
|
||||
podEvictor.erCache.assumedRequestTimeoutSeconds = 1
|
||||
podEvictor.erCache.cleanCache(ctx)
|
||||
if totalERs := podEvictor.TotalEvictionRequests(); totalERs > 0 {
|
||||
t.Fatalf("Expected 0 eviction requests, got %v instead", totalERs)
|
||||
}
|
||||
}
|
||||
|
||||
func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) {
|
||||
t.Logf("Assert for events: %v", expected)
|
||||
c := time.After(wait.ForeverTestTimeout)
|
||||
for _, e := range expected {
|
||||
select {
|
||||
case a := <-actual:
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("Expected event %q, got %q instead", e, a)
|
||||
}
|
||||
case <-c:
|
||||
t.Errorf("Expected event %q, got nothing", e)
|
||||
// continue iterating to print all expected events
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case a := <-actual:
|
||||
t.Errorf("Unexpected event: %q", a)
|
||||
default:
|
||||
return // No more events, as expected.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
package evictions
|
||||
|
||||
import (
|
||||
policy "k8s.io/api/policy/v1"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
maxPodsToEvictPerNamespace *uint
|
||||
maxPodsToEvictTotal *uint
|
||||
evictionFailureEventNotification bool
|
||||
metricsEnabled bool
|
||||
gracePeriodSeconds *int64
|
||||
}
|
||||
|
||||
// NewOptions returns an Options with default values.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
policyGroupVersion: policy.SchemeGroupVersion.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) WithPolicyGroupVersion(policyGroupVersion string) *Options {
|
||||
o.policyGroupVersion = policyGroupVersion
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithDryRun(dryRun bool) *Options {
|
||||
o.dryRun = dryRun
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNode(maxPodsToEvictPerNode *uint) *Options {
|
||||
o.maxPodsToEvictPerNode = maxPodsToEvictPerNode
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictPerNamespace(maxPodsToEvictPerNamespace *uint) *Options {
|
||||
o.maxPodsToEvictPerNamespace = maxPodsToEvictPerNamespace
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMaxPodsToEvictTotal(maxPodsToEvictTotal *uint) *Options {
|
||||
o.maxPodsToEvictTotal = maxPodsToEvictTotal
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithGracePeriodSeconds(gracePeriodSeconds *int64) *Options {
|
||||
o.gracePeriodSeconds = gracePeriodSeconds
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithMetricsEnabled(metricsEnabled bool) *Options {
|
||||
o.metricsEnabled = metricsEnabled
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Options) WithEvictionFailureEventNotification(evictionFailureEventNotification *bool) *Options {
|
||||
if evictionFailureEventNotification != nil {
|
||||
o.evictionFailureEventNotification = *evictionFailureEventNotification
|
||||
}
|
||||
return o
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listercorev1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
utilptr "k8s.io/utils/ptr"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
)
|
||||
|
||||
const (
|
||||
beta float64 = 0.9
|
||||
)
|
||||
|
||||
type MetricsCollector struct {
|
||||
nodeLister listercorev1.NodeLister
|
||||
metricsClientset metricsclient.Interface
|
||||
nodeSelector labels.Selector
|
||||
|
||||
nodes map[string]api.ReferencedResourceList
|
||||
|
||||
mu sync.RWMutex
|
||||
// hasSynced signals at least one sync succeeded
|
||||
hasSynced bool
|
||||
}
|
||||
|
||||
func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset metricsclient.Interface, nodeSelector labels.Selector) *MetricsCollector {
|
||||
return &MetricsCollector{
|
||||
nodeLister: nodeLister,
|
||||
metricsClientset: metricsClientset,
|
||||
nodeSelector: nodeSelector,
|
||||
nodes: make(map[string]api.ReferencedResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Run(ctx context.Context) {
|
||||
wait.NonSlidingUntil(func() {
|
||||
mc.Collect(ctx)
|
||||
}, 5*time.Second, ctx.Done())
|
||||
}
|
||||
|
||||
// During experiments rounding to int error causes weightedAverage to never
|
||||
// reach value even when weightedAverage is repeated many times in a row.
|
||||
// The difference between the limit and computed average stops within 5 units.
|
||||
// Nevertheless, the value is expected to change in time. So the weighted
|
||||
// average nevers gets a chance to converge. Which makes the computed
|
||||
// error negligible.
|
||||
// The speed of convergence depends on how often the metrics collector
|
||||
// syncs with the current value. Currently, the interval is set to 5s.
|
||||
func weightedAverage(prevValue, value int64) int64 {
|
||||
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
allNodesUsage := make(map[string]api.ReferencedResourceList)
|
||||
for nodeName := range mc.nodes {
|
||||
allNodesUsage[nodeName] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
|
||||
}
|
||||
}
|
||||
|
||||
return allNodesUsage, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
|
||||
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
|
||||
}
|
||||
return api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) HasSynced() bool {
|
||||
return mc.hasSynced
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) MetricsClient() metricsclient.Interface {
|
||||
return mc.metricsClientset
|
||||
}
|
||||
|
||||
func (mc *MetricsCollector) Collect(ctx context.Context) error {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
nodes, err := mc.nodeLister.List(mc.nodeSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodes: %v", err)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
metrics, err := mc.metricsClientset.MetricsV1beta1().NodeMetricses().Get(ctx, node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error fetching metrics", "node", node.Name)
|
||||
// No entry -> duplicate the previous value -> do nothing as beta*PV + (1-beta)*PV = PV
|
||||
continue
|
||||
}
|
||||
|
||||
if _, exists := mc.nodes[node.Name]; !exists {
|
||||
mc.nodes[node.Name] = api.ReferencedResourceList{
|
||||
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
|
||||
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
|
||||
}
|
||||
} else {
|
||||
// get MilliValue to reduce loss of precision
|
||||
mc.nodes[node.Name][v1.ResourceCPU].SetMilli(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceCPU].MilliValue(), metrics.Usage.Cpu().MilliValue()),
|
||||
)
|
||||
mc.nodes[node.Name][v1.ResourceMemory].Set(
|
||||
weightedAverage(mc.nodes[node.Name][v1.ResourceMemory].Value(), metrics.Usage.Memory().Value()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
mc.hasSynced = true
|
||||
return nil
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metricscollector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/informers"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
|
||||
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
|
||||
if usage[v1.ResourceCPU].MilliValue() != millicpu {
|
||||
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricsCollector(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu usage to 500")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(500, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1310)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1310)
|
||||
|
||||
t.Logf("Set current node cpu usage to 900")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1269)
|
||||
allnodesUsage, _ = collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1269)
|
||||
}
|
||||
|
||||
func TestMetricsCollectorConvergence(t *testing.T) {
|
||||
gvr := schema.GroupVersionResource{Group: "metrics.k8s.io", Version: "v1beta1", Resource: "nodes"}
|
||||
|
||||
n1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
|
||||
n2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
|
||||
n3 := test.BuildTestNode("n3", 2000, 3000, 10, nil)
|
||||
|
||||
n1metrics := test.BuildNodeMetrics("n1", 400, 1714978816)
|
||||
n2metrics := test.BuildNodeMetrics("n2", 1400, 1714978816)
|
||||
n3metrics := test.BuildNodeMetrics("n3", 300, 1714978816)
|
||||
|
||||
clientset := fakeclientset.NewSimpleClientset(n1, n2, n3)
|
||||
metricsClientset := fakemetricsclient.NewSimpleClientset()
|
||||
metricsClientset.Tracker().Create(gvr, n1metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n2metrics, "")
|
||||
metricsClientset.Tracker().Create(gvr, n3metrics, "")
|
||||
|
||||
ctx := context.TODO()
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||
nodeLister := sharedInformerFactory.Core().V1().Nodes().Lister()
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
t.Logf("Set initial node cpu usage to 1400")
|
||||
collector := NewMetricsCollector(nodeLister, metricsClientset, labels.Everything())
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ := collector.NodeUsage(n2)
|
||||
checkCpuNodeUsage(t, nodesUsage, 1400)
|
||||
allnodesUsage, _ := collector.AllNodesUsage()
|
||||
checkCpuNodeUsage(t, allnodesUsage[n2.Name], 1400)
|
||||
|
||||
t.Logf("Set current node cpu/memory usage to 900/1614978816 and wait until it converges to it")
|
||||
n2metrics.Usage[v1.ResourceCPU] = *resource.NewMilliQuantity(900, resource.DecimalSI)
|
||||
n2metrics.Usage[v1.ResourceMemory] = *resource.NewQuantity(1614978816, resource.BinarySI)
|
||||
metricsClientset.Tracker().Update(gvr, n2metrics, "")
|
||||
converged := false
|
||||
for i := 0; i < 300; i++ {
|
||||
collector.Collect(context.TODO())
|
||||
nodesUsage, _ = collector.NodeUsage(n2)
|
||||
if math.Abs(float64(900-nodesUsage[v1.ResourceCPU].MilliValue())) < 6 && math.Abs(float64(1614978816-nodesUsage[v1.ResourceMemory].Value())) < 6 {
|
||||
t.Logf("Node cpu/memory usage converged to 900+-5/1614978816+-5")
|
||||
converged = true
|
||||
break
|
||||
}
|
||||
t.Logf("The current node usage: cpu=%v, memory=%v", nodesUsage[v1.ResourceCPU].MilliValue(), nodesUsage[v1.ResourceMemory].Value())
|
||||
}
|
||||
if !converged {
|
||||
t.Fatalf("The node usage did not converged to 900+-1")
|
||||
}
|
||||
}
|
||||
@@ -18,9 +18,7 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -28,15 +26,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
listersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
const workersCount = 100
|
||||
|
||||
// ReadyNodes returns ready nodes irrespective of whether they are
|
||||
// schedulable or not.
|
||||
func ReadyNodes(ctx context.Context, client clientset.Interface, nodeLister listersv1.NodeLister, nodeSelector string) ([]*v1.Node, error) {
|
||||
@@ -109,97 +103,90 @@ func IsReady(node *v1.Node) bool {
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
// There should be no methods to modify nodes or pods in this method.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) error {
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
|
||||
// Check node selector and required affinity
|
||||
var errors []error
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
return err
|
||||
errors = append(errors, err)
|
||||
} else if !ok {
|
||||
return errors.New("pod node selector does not match the node label")
|
||||
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
|
||||
}
|
||||
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
return errors.New("pod does not tolerate taints on the node")
|
||||
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||
}
|
||||
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
if pod.Spec.NodeName == "" || pod.Spec.NodeName != node.Name {
|
||||
if ok, reqError := fitsRequest(nodeIndexer, pod, node); !ok {
|
||||
return reqError
|
||||
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||
if !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
return errors.New("node is not schedulable")
|
||||
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||
}
|
||||
|
||||
// Check if pod matches inter-pod anti-affinity rule of pod on node
|
||||
if match, err := podMatchesInterPodAntiAffinity(nodeIndexer, pod, node); err != nil {
|
||||
return err
|
||||
} else if match {
|
||||
return errors.New("pod matches inter-pod anti-affinity rule of other pod on node")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func podFitsNodes(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node, excludeFilter func(pod *v1.Pod, node *v1.Node) bool) bool {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var filteredLen int32
|
||||
checkNode := func(i int) {
|
||||
node := nodes[i]
|
||||
if excludeFilter != nil && excludeFilter(pod, node) {
|
||||
return
|
||||
}
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
atomic.AddInt32(&filteredLen, 1)
|
||||
cancel()
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node), "err", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Stops searching for more nodes once a node are found.
|
||||
workqueue.ParallelizeUntil(ctx, workersCount, len(nodes), checkNode)
|
||||
|
||||
return filteredLen > 0
|
||||
return errors
|
||||
}
|
||||
|
||||
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
|
||||
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
return pod.Spec.NodeName == node.Name
|
||||
})
|
||||
for _, node := range nodes {
|
||||
// Skip node pod is already on
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
continue
|
||||
}
|
||||
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
return podFitsNodes(nodeIndexer, pod, nodes, nil)
|
||||
for _, node := range nodes {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
|
||||
err := NodeFit(nodeIndexer, pod, node)
|
||||
if err == nil {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Pod does not fit on current node",
|
||||
"pod", klog.KObj(pod), "node", klog.KObj(node), "error", err)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -211,61 +198,47 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
|
||||
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
|
||||
// the pod will fit.
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
|
||||
var insufficientResources []error
|
||||
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := []v1.ResourceName{v1.ResourcePods}
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
for name := range podRequests {
|
||||
resourceNames = append(resourceNames, name)
|
||||
}
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames,
|
||||
func(pod *v1.Pod) (v1.ResourceList, error) {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
return req, nil
|
||||
},
|
||||
)
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, []error{err}
|
||||
}
|
||||
|
||||
podFitsOnNode := true
|
||||
for _, resource := range resourceNames {
|
||||
podResourceRequest := podRequests[resource]
|
||||
availableResource, ok := availableResources[resource]
|
||||
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
|
||||
return false, fmt.Errorf("insufficient %v", resource)
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
|
||||
podFitsOnNode = false
|
||||
}
|
||||
}
|
||||
// check pod num, at least one pod number is avaibalbe
|
||||
if quantity, ok := availableResources[v1.ResourcePods]; ok && quantity.MilliValue() <= 0 {
|
||||
return false, fmt.Errorf("insufficient %v", v1.ResourcePods)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return podFitsOnNode, insufficientResources
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeUtilization, err := NodeUtilization(podsOnNode, resourceNames, podUtilization)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||
}
|
||||
remainingResources := api.ReferencedResourceList{}
|
||||
for _, name := range resourceNames {
|
||||
if IsBasicResource(name) {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
remainingResources[name] = resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
remainingResources[name] = resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI)
|
||||
}
|
||||
} else {
|
||||
if !IsBasicResource(name) {
|
||||
if _, exists := node.Status.Allocatable[name]; exists {
|
||||
allocatableResource := node.Status.Allocatable[name]
|
||||
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||
@@ -279,37 +252,31 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
|
||||
totalUtilization := api.ReferencedResourceList{}
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
switch name {
|
||||
case v1.ResourceCPU:
|
||||
totalUtilization[name] = resource.NewMilliQuantity(0, resource.DecimalSI)
|
||||
case v1.ResourceMemory:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.BinarySI)
|
||||
case v1.ResourcePods:
|
||||
totalUtilization[name] = resource.NewQuantity(int64(len(pods)), resource.DecimalSI)
|
||||
default:
|
||||
totalUtilization[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
if !IsBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
podUtil, err := podUtilization(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := podUtil[name]
|
||||
quantity, ok := req[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalUtilization[name].Add(quantity)
|
||||
totalReqs[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalUtilization, nil
|
||||
return totalReqs
|
||||
}
|
||||
|
||||
// IsBasicResource checks if resource is basic native.
|
||||
@@ -321,77 +288,3 @@ func IsBasicResource(name v1.ResourceName) bool {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// GetNodeWeightGivenPodPreferredAffinity returns the weight
|
||||
// that the pod gives to a node by analyzing the soft node affinity of that pod
|
||||
// (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, node *v1.Node) int32 {
|
||||
totalWeight, err := utils.GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return totalWeight
|
||||
}
|
||||
|
||||
// GetBestNodeWeightGivenPodPreferredAffinity returns the best weight
|
||||
// (maximum one) that the pod gives to the best node by analyzing the soft node affinity
|
||||
// of that pod (nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution)
|
||||
func GetBestNodeWeightGivenPodPreferredAffinity(pod *v1.Pod, nodes []*v1.Node) int32 {
|
||||
var bestWeight int32 = 0
|
||||
for _, node := range nodes {
|
||||
weight := GetNodeWeightGivenPodPreferredAffinity(pod, node)
|
||||
if weight > bestWeight {
|
||||
bestWeight = weight
|
||||
}
|
||||
}
|
||||
return bestWeight
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, node *v1.Node) bool {
|
||||
matches, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// podMatchesInterPodAntiAffinity checks if the pod matches the anti-affinity rule
|
||||
// of another pod that is already on the given node.
|
||||
// If a match is found, it returns true.
|
||||
func podMatchesInterPodAntiAffinity(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||
if pod.Spec.Affinity == nil || pod.Spec.Affinity.PodAntiAffinity == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing all pods: %v", err)
|
||||
}
|
||||
assignedPodsInNamespace := podutil.GroupByNamespace(podsOnNode)
|
||||
|
||||
for _, term := range utils.GetPodAntiAffinityTerms(pod.Spec.Affinity.PodAntiAffinity) {
|
||||
namespaces := utils.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Unable to convert LabelSelector into Selector")
|
||||
return false, err
|
||||
}
|
||||
|
||||
for namespace := range namespaces {
|
||||
for _, assignedPod := range assignedPodsInNamespace[namespace] {
|
||||
if assignedPod.Name == pod.Name || !utils.PodMatchesTermsNamespaceAndSelector(assignedPod, namespaces, selector) {
|
||||
klog.V(4).InfoS("Pod doesn't match inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := node.Labels[term.TopologyKey]; ok {
|
||||
klog.V(1).InfoS("Pod matches inter-pod anti-affinity rule of assigned pod on node", "candidatePod", klog.KObj(pod), "assignedPod", klog.KObj(assignedPod))
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -18,8 +18,6 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -231,7 +229,7 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
nodeTaintValue := "gpu"
|
||||
|
||||
// Staging node has no scheduling restrictions, but the pod always starts here and PodFitsAnyOtherNode() doesn't take into account the node the pod is running on.
|
||||
nodeNames := []string{"node1", "node2", "stagingNode", "node4"}
|
||||
nodeNames := []string{"node1", "node2", "stagingNode"}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -717,151 +715,6 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. One node has a taint, and the other three nodes do not meet the resource requirements, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 3000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 3000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, just fourth node meets the requirements, should success",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "There are four nodes. First node has a taint, second node has no label, third node do not meet the resource requirements, fourth node is the one where the pod is located, should fail",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[3], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 8000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -899,230 +752,6 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsNodes(t *testing.T) {
|
||||
nodeNames := []string{"node1", "node2", "node3", "node4"}
|
||||
pod := test.BuildTestPod("p1", 950, 2*1000*1000*1000, nodeNames[0], nil)
|
||||
nodes := []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 1000, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[1], 200, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[2], 300, 8*1000*1000*1000, 12, nil),
|
||||
test.BuildTestNode(nodeNames[3], 400, 8*1000*1000*1000, 12, nil),
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
objs = append(objs, pod)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
var nodesTraversed sync.Map
|
||||
podFitsNodes(getPodsAssignedToNode, pod, nodes, func(pod *v1.Pod, node *v1.Node) bool {
|
||||
nodesTraversed.Store(node.Name, node)
|
||||
return true
|
||||
})
|
||||
|
||||
for _, node := range nodes {
|
||||
if _, exists := nodesTraversed.Load(node.Name); !exists {
|
||||
t.Errorf("Node %v was not proccesed", node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeFit(t *testing.T) {
|
||||
node := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"region": "main-region",
|
||||
}
|
||||
})
|
||||
|
||||
nodeNolabel := test.BuildTestNode("node", 64000, 128*1000*1000*1000, 2, nil)
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
node *v1.Node
|
||||
podsOnNode []*v1.Pod
|
||||
err error
|
||||
}{
|
||||
{
|
||||
description: "Insufficient cpu",
|
||||
pod: test.BuildTestPod("p1", 10000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 60000, 60*1000*1000*1000, "node", nil),
|
||||
},
|
||||
err: errors.New("insufficient cpu"),
|
||||
},
|
||||
{
|
||||
description: "Insufficient pod num",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, "", nil),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 1000, 2*1000*1000*1000, "node", nil),
|
||||
test.BuildTestPod("p3", 1000, 2*1000*1000*1000, "node", nil),
|
||||
},
|
||||
err: errors.New("insufficient pods"),
|
||||
},
|
||||
{
|
||||
description: "Pod matches inter-pod anti-affinity rule of other pod on node",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
err: errors.New("pod matches inter-pod anti-affinity rule of other pod on node"),
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because pod and other pod is not same namespace",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, func(pod *v1.Pod) {
|
||||
pod.Namespace = "test"
|
||||
}), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because other pod not match labels of pod",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo1", "bar1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod doesn't match inter-pod anti-affinity rule of other pod on node, because node have no topologyKey",
|
||||
pod: test.PodWithPodAntiAffinity(test.BuildTestPod("p1", 1000, 1000, "node1", nil), "foo", "bar"),
|
||||
node: nodeNolabel,
|
||||
podsOnNode: []*v1.Pod{
|
||||
test.PodWithPodAntiAffinity(test.BuildTestPod("p2", 1000, 1000, node.Name, nil), "foo", "bar"),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "Pod fits on node",
|
||||
pod: test.BuildTestPod("p1", 1000, 1000, "", func(pod *v1.Pod) {}),
|
||||
node: node,
|
||||
podsOnNode: []*v1.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
objs := []runtime.Object{tc.node, tc.pod}
|
||||
for _, pod := range tc.podsOnNode {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods().Informer()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
err = NodeFit(getPodsAssignedToNode, tc.pod, tc.node)
|
||||
if (err == nil && tc.err != nil) || (err != nil && err.Error() != tc.err.Error()) {
|
||||
t.Errorf("Test %#v failed, got %v, expect %v", tc.description, err, tc.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBestPodNodeAffinityWeight(t *testing.T) {
|
||||
defaultPod := test.BuildTestPod("p1", 0, 0, "node1", func(p *v1.Pod) {
|
||||
p.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Weight: 10,
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "key1",
|
||||
Operator: "In",
|
||||
Values: []string{"value1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
tests := []struct {
|
||||
description string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedWeight int32
|
||||
}{
|
||||
{
|
||||
description: "No node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key3": "value3",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 0,
|
||||
},
|
||||
{
|
||||
description: "A single node matches the preferred affinity",
|
||||
pod: defaultPod,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key1": "value1",
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node2", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
"key2": "value2",
|
||||
}
|
||||
}),
|
||||
},
|
||||
expectedWeight: 10,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
bestWeight := GetBestNodeWeightGivenPodPreferredAffinity(tc.pod, tc.nodes)
|
||||
if bestWeight != tc.expectedWeight {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// createResourceList builds a small resource list of core resources
|
||||
func createResourceList(cpu, memory, ephemeralStorage int64) v1.ResourceList {
|
||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user