mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
15 Commits
c4ec31684f
...
chart-0.18
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c6c3be6362 | ||
|
|
d2b8f07876 | ||
|
|
f959101622 | ||
|
|
870a518db4 | ||
|
|
60e41670af | ||
|
|
37c277ca1f | ||
|
|
0777837a13 | ||
|
|
5222378f5a | ||
|
|
104786844a | ||
|
|
b58fcb4d77 | ||
|
|
e40fab430c | ||
|
|
aaaa1c5ba6 | ||
|
|
e7d932ff4e | ||
|
|
e9beffd28d | ||
|
|
6bdfbb5b5f |
6
.gitattributes
vendored
6
.gitattributes
vendored
@@ -1,6 +0,0 @@
|
||||
# Always check-out / check-in files with LF line endings.
|
||||
* text=auto eol=lf
|
||||
|
||||
go.sum merge=union
|
||||
**/zz_generated.*.go linguist-generated=true
|
||||
vendor/** linguist-vendored
|
||||
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
46
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,46 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a bug report to help improve descheduler
|
||||
title: ''
|
||||
labels: 'kind/bug'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your bug report. Thanks! -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
|
||||
**Does this issue reproduce with the latest release?**
|
||||
|
||||
|
||||
**Which descheduler CLI options are you using?**
|
||||
|
||||
|
||||
**Please provide a copy of your descheduler policy config file**
|
||||
|
||||
|
||||
**What k8s version are you using (`kubectl version`)?**
|
||||
|
||||
<details><summary><code>kubectl version</code> Output</summary><br><pre>
|
||||
$ kubectl version
|
||||
|
||||
</pre></details>
|
||||
|
||||
|
||||
**What did you do?**
|
||||
|
||||
<!--
|
||||
If possible, provide a recipe for reproducing the error.
|
||||
A detailed sequence of steps describing what to do to observe the issue is good.
|
||||
A complete runnable bash shell script is best.
|
||||
-->
|
||||
|
||||
|
||||
**What did you expect to see?**
|
||||
|
||||
|
||||
**What did you see instead?**
|
||||
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
26
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,26 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for descheduler
|
||||
title: ''
|
||||
labels: 'kind/feature'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please answer these questions before submitting your feature request. Thanks! -->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
**Describe the solution you'd like**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
**What version of descheduler are you using?**
|
||||
|
||||
descheduler version:
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
18
.github/ISSUE_TEMPLATE/misc_request.md
vendored
@@ -1,18 +0,0 @@
|
||||
---
|
||||
name: Miscellaneous
|
||||
about: Not a bug and not a feature
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Please do not use this to submit a bug report or feature request. Use the
|
||||
bug report or feature request options instead.
|
||||
|
||||
Also, please consider posting in the Kubernetes Slack #sig-scheduling channel
|
||||
instead of opening an issue if this is a support request.
|
||||
|
||||
Thanks!
|
||||
-->
|
||||
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
22
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,22 +0,0 @@
|
||||
## Description
|
||||
<!-- Please include a summary of the change and which issue is fixed -->
|
||||
|
||||
## Checklist
|
||||
Please ensure your pull request meets the following criteria before submitting
|
||||
for review, these items will be used by reviewers to assess the quality and
|
||||
completeness of your changes:
|
||||
|
||||
- [ ] **Code Readability**: Is the code easy to understand, well-structured, and consistent with project conventions?
|
||||
- [ ] **Naming Conventions**: Are variable, function, and structs descriptive and consistent?
|
||||
- [ ] **Code Duplication**: Is there any repeated code that should be refactored?
|
||||
- [ ] **Function/Method Size**: Are functions/methods short and focused on a single task?
|
||||
- [ ] **Comments & Documentation**: Are comments clear, useful, and not excessive? Were comments updated where necessary?
|
||||
- [ ] **Error Handling**: Are errors handled appropriately ?
|
||||
- [ ] **Testing**: Are there sufficient unit/integration tests?
|
||||
- [ ] **Performance**: Are there any obvious performance issues or unnecessary computations?
|
||||
- [ ] **Dependencies**: Are new dependencies justified ?
|
||||
- [ ] **Logging & Monitoring**: Is logging used appropriately (not too verbose, not too silent)?
|
||||
- [ ] **Backward Compatibility**: Does this change break any existing functionality or APIs?
|
||||
- [ ] **Resource Management**: Are resources (files, connections, memory) managed and released properly?
|
||||
- [ ] **PR Description**: Is the PR description clear, providing enough context and explaining the motivation for the change?
|
||||
- [ ] **Documentation & Changelog**: Are README and docs updated if necessary?
|
||||
5
.github/ci/ct.yaml
vendored
5
.github/ci/ct.yaml
vendored
@@ -1,5 +0,0 @@
|
||||
chart-dirs:
|
||||
- charts
|
||||
helm-extra-args: "--timeout=5m"
|
||||
check-version-increment: false
|
||||
target-branch: master
|
||||
70
.github/workflows/helm.yaml
vendored
70
.github/workflows/helm.yaml
vendored
@@ -1,70 +0,0 @@
|
||||
name: Helm
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release-*
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/helm.yaml'
|
||||
- '.github/ci/ct.yaml'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/helm.yaml'
|
||||
- '.github/ci/ct.yaml'
|
||||
|
||||
jobs:
|
||||
lint-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.15.1
|
||||
|
||||
- uses: actions/setup-python@v5.1.1
|
||||
with:
|
||||
python-version: 3.12
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
with:
|
||||
version: v3.11.0
|
||||
|
||||
- name: Install Helm Unit Test Plugin
|
||||
run: |
|
||||
helm plugin install https://github.com/helm-unittest/helm-unittest
|
||||
|
||||
- name: Run Helm Unit Tests
|
||||
run: |
|
||||
helm unittest charts/descheduler --strict -d
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config=.github/ci/ct.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --config=.github/ci/ct.yaml --validate-maintainers=false
|
||||
|
||||
# Need a multi node cluster so descheduler runs until evictions
|
||||
- name: Create multi node Kind cluster
|
||||
run: make kind-multi-node
|
||||
|
||||
# helm-extra-set-args only available after ct 3.6.0
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config=.github/ci/ct.yaml --helm-extra-set-args='--set=kind=Deployment'
|
||||
44
.github/workflows/manifests.yaml
vendored
44
.github/workflows/manifests.yaml
vendored
@@ -1,44 +0,0 @@
|
||||
name: manifests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ["v1.34.0"]
|
||||
descheduler-version: ["v0.34.0"]
|
||||
descheduler-api: ["v1alpha2"]
|
||||
manifest: ["deployment"]
|
||||
kind-version: ["v0.30.0"] # keep in sync with test/run-e2e-tests.sh
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v4
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
node_image: kindest/node:${{ matrix.k8s-version }}
|
||||
kubectl_version: ${{ matrix.k8s-version }}
|
||||
config: test/kind-config.yaml
|
||||
version: ${{ matrix.kind-version }}
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: true
|
||||
- name: Build image
|
||||
run: |
|
||||
VERSION="dev" make dev-image
|
||||
docker tag descheduler:dev registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }}
|
||||
- name: Kind load image
|
||||
run: |
|
||||
kind load docker-image registry.k8s.io/descheduler/descheduler:${{ matrix.descheduler-version }} --name chart-testing
|
||||
- name: Create k8s manifests
|
||||
run: |
|
||||
kubectl create -f kubernetes/base/rbac.yaml
|
||||
kubectl create -f test/manifests/${{ matrix.descheduler-api }}/configmap.yaml
|
||||
kubectl create -f kubernetes/${{ matrix.manifest }}/${{ matrix.manifest }}.yaml
|
||||
- name: Wait for ready condition
|
||||
run: |
|
||||
kubectl wait --for=condition=Available --timeout=60s ${{ matrix.manifest }} descheduler -n kube-system
|
||||
22
.github/workflows/release.yaml
vendored
22
.github/workflows/release.yaml
vendored
@@ -2,11 +2,8 @@ name: Release Charts
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- release-*
|
||||
|
||||
permissions:
|
||||
contents: write # allow actions to update gh-pages branch
|
||||
tags:
|
||||
- chart-*
|
||||
|
||||
jobs:
|
||||
release:
|
||||
@@ -14,21 +11,20 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4.2.0
|
||||
with:
|
||||
version: v3.15.1
|
||||
- name: Fetch history
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
uses: helm/chart-releaser-action@v1.0.0-rc.2
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
CR_RELEASE_NAME_TEMPLATE: "descheduler-helm-chart-{{ .Version }}"
|
||||
|
||||
47
.github/workflows/security.yaml
vendored
47
.github/workflows/security.yaml
vendored
@@ -1,47 +0,0 @@
|
||||
name: "Security"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- release-*
|
||||
schedule:
|
||||
- cron: '30 1 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build image
|
||||
run: |
|
||||
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
|
||||
IMAGE_TAG=${HELM_IMAGE_TAG:-security-test}
|
||||
VERSION=security-test make image
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
image-ref: 'descheduler:security-test'
|
||||
format: 'sarif'
|
||||
exit-code: '0'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
exit-code: '0'
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,6 +3,3 @@ _tmp/
|
||||
vendordiff.patch
|
||||
.idea/
|
||||
*.code-workspace
|
||||
.vscode/
|
||||
kind
|
||||
bin/
|
||||
@@ -1,18 +1,15 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
deadline: 2m
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- gosimple
|
||||
- gocyclo
|
||||
- misspell
|
||||
- govet
|
||||
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
extra-rules: true
|
||||
goimports:
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
local-prefixes: sigs.k8s.io/descheduler
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Descheduler Design Constraints
|
||||
|
||||
This is a slowly growing document that lists good practices, conventions, and design decisions.
|
||||
|
||||
## Overview
|
||||
|
||||
TBD
|
||||
|
||||
## Code convention
|
||||
|
||||
* *formatting code*: running `make fmt` before committing each change to avoid ci failing
|
||||
|
||||
## Unit Test Conventions
|
||||
|
||||
These are the known conventions that are useful to practice whenever reasonable:
|
||||
|
||||
* *single pod creation*: each pod variable built using `test.BuildTestPod` is updated only through the `apply` argument of `BuildTestPod`
|
||||
* *single node creation*: each node variable built using `test.BuildTestNode` is updated only through the `apply` argument of `BuildTestNode`
|
||||
* *no object instance sharing*: each object built through `test.BuildXXX` functions is newly created in each unit test to avoid accidental object mutations
|
||||
* *no object instance duplication*: avoid duplication by no creating two objects with the same passed values at two different places. E.g. two nodes created with the same memory, cpu and pods requests. Rather create a single function wrapping test.BuildTestNode and invoke this wrapper multiple times.
|
||||
|
||||
The aim is to reduce cognitive load when reading and debugging the test code.
|
||||
|
||||
## Design Decisions FAQ
|
||||
|
||||
This section documents common questions about design decisions in the descheduler codebase and the rationale behind them.
|
||||
|
||||
### Why doesn't the framework provide helpers for registering and retrieving indexers for plugins?
|
||||
|
||||
In general, each plugin can have many indexers—for example, for nodes, namespaces, pods, and other resources. Each plugin, depending on its internal optimizations, may choose a different indexing function. Indexers are currently used very rarely in the framework and default plugins. Therefore, extending the framework interface with additional helpers for registering and retrieving indexers might introduce an unnecessary and overly restrictive layer without first understanding how indexers will be used. For the moment, I suggest avoiding any restrictions on how many indexers can be registered or which ones can be registered. Instead, we should extend the framework handle to provide a unique ID for each profile, so that indexers within the same profile share a unique prefix. This avoids collisions when the same profile is instantiated more than once. Later, once we learn more about indexer usage, we can revisit whether it makes sense to impose additional restrictions.
|
||||
12
Dockerfile
12
Dockerfile
@@ -11,21 +11,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.24.6
|
||||
FROM golang:1.13.9
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
ARG ARCH
|
||||
ARG VERSION
|
||||
RUN VERSION=${VERSION} make build.$ARCH
|
||||
RUN make
|
||||
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/kubernetes-sigs/descheduler
|
||||
|
||||
USER 1000
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
|
||||
COPY --from=0 /go/src/sigs.k8s.io/descheduler/_output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
@@ -13,9 +13,7 @@
|
||||
# limitations under the License.
|
||||
FROM scratch
|
||||
|
||||
MAINTAINER Kubernetes SIG Scheduling <sig-scheduling@kubernetes.io>
|
||||
|
||||
USER 1000
|
||||
MAINTAINER Avesh Agarwal <avesh.ncsu@gmail.com>
|
||||
|
||||
COPY _output/bin/descheduler /bin/descheduler
|
||||
|
||||
|
||||
122
Makefile
122
Makefile
@@ -14,25 +14,16 @@
|
||||
|
||||
.PHONY: test
|
||||
|
||||
export CONTAINER_ENGINE ?= docker
|
||||
|
||||
# VERSION is based on a date stamp plus the last commit
|
||||
VERSION?=v$(shell date +%Y%m%d)-$(shell git describe --tags)
|
||||
BRANCH?=$(shell git branch --show-current)
|
||||
SHA1?=$(shell git rev-parse HEAD)
|
||||
# VERSION is currently based on the last commit
|
||||
VERSION?=$(shell git describe --tags)
|
||||
COMMIT=$(shell git rev-parse HEAD)
|
||||
BUILD=$(shell date +%FT%T%z)
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/pkg/version
|
||||
ARCHS = amd64 arm arm64
|
||||
LDFLAG_LOCATION=sigs.k8s.io/descheduler/cmd/descheduler/app
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitCommit=${COMMIT}"
|
||||
|
||||
GOLANGCI_VERSION := v1.64.8
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
GOFUMPT_VERSION := v0.7.0
|
||||
HAS_GOFUMPT := $(shell command -v gofumpt 2> /dev/null)
|
||||
|
||||
GO_VERSION := $(shell (command -v jq > /dev/null && (go mod edit -json | jq -r .Go)) || (sed -En 's/^go (.*)$$/\1/p' go.mod))
|
||||
GOLANGCI_VERSION := v1.15.0
|
||||
HAS_GOLANGCI := $(shell which golangci-lint)
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
# into. The default is to push to the staging
|
||||
@@ -45,74 +36,35 @@ IMAGE:=descheduler:$(VERSION)
|
||||
# IMAGE_GCLOUD is the image name of descheduler in the remote registry
|
||||
IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
|
||||
|
||||
# CURRENT_DIR is the current dir where the Makefile exists
|
||||
CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
|
||||
# TODO: upload binaries to GCS bucket
|
||||
#
|
||||
# In the future binaries can be uploaded to
|
||||
# GCS bucket gs://k8s-staging-descheduler.
|
||||
|
||||
HAS_HELM := $(shell which helm 2> /dev/null)
|
||||
HAS_HELM := $(shell which helm)
|
||||
|
||||
all: build
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.amd64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
build.arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build ${LDFLAGS} -o _output/bin/descheduler sigs.k8s.io/descheduler/cmd/descheduler
|
||||
|
||||
dev-image: build
|
||||
$(CONTAINER_ENGINE) build -f Dockerfile.dev -t $(IMAGE) .
|
||||
docker build -f Dockerfile.dev -t $(IMAGE) .
|
||||
|
||||
image:
|
||||
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE) .
|
||||
docker build -t $(IMAGE) .
|
||||
|
||||
image.amd64:
|
||||
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="amd64" -t $(IMAGE)-amd64 .
|
||||
|
||||
image.arm:
|
||||
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm" -t $(IMAGE)-arm .
|
||||
|
||||
image.arm64:
|
||||
$(CONTAINER_ENGINE) build --build-arg VERSION="$(VERSION)" --build-arg ARCH="arm64" -t $(IMAGE)-arm64 .
|
||||
|
||||
push: image
|
||||
push-container-to-gcloud: image
|
||||
gcloud auth configure-docker
|
||||
$(CONTAINER_ENGINE) tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)
|
||||
docker tag $(IMAGE) $(IMAGE_GCLOUD)
|
||||
docker push $(IMAGE_GCLOUD)
|
||||
|
||||
push-all: image.amd64 image.arm image.arm64
|
||||
gcloud auth configure-docker
|
||||
for arch in $(ARCHS); do \
|
||||
$(CONTAINER_ENGINE) tag $(IMAGE)-$${arch} $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
$(CONTAINER_ENGINE) push $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest create $(IMAGE_GCLOUD) $(addprefix --amend $(IMAGE_GCLOUD)-, $(ARCHS))
|
||||
for arch in $(ARCHS); do \
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest annotate --arch $${arch} $(IMAGE_GCLOUD) $(IMAGE_GCLOUD)-$${arch} ;\
|
||||
done
|
||||
DOCKER_CLI_EXPERIMENTAL=enabled $(CONTAINER_ENGINE) manifest push $(IMAGE_GCLOUD) ;\
|
||||
push: push-container-to-gcloud
|
||||
|
||||
clean:
|
||||
rm -rf _output
|
||||
rm -rf _tmp
|
||||
|
||||
verify: verify-govet verify-spelling verify-gofmt verify-vendor lint lint-chart verify-gen
|
||||
|
||||
verify-govet:
|
||||
./hack/verify-govet.sh
|
||||
|
||||
verify-spelling:
|
||||
./hack/verify-spelling.sh
|
||||
verify: verify-gofmt verify-vendor lint lint-chart
|
||||
|
||||
verify-gofmt:
|
||||
./hack/verify-gofmt.sh
|
||||
@@ -120,9 +72,6 @@ verify-gofmt:
|
||||
verify-vendor:
|
||||
./hack/verify-vendor.sh
|
||||
|
||||
verify-docs:
|
||||
./hack/verify-docs.sh
|
||||
|
||||
test-unit:
|
||||
./test/run-unit-tests.sh
|
||||
|
||||
@@ -133,47 +82,16 @@ gen:
|
||||
./hack/update-generated-conversions.sh
|
||||
./hack/update-generated-deep-copies.sh
|
||||
./hack/update-generated-defaulters.sh
|
||||
./hack/update-docs.sh
|
||||
|
||||
gen-docker:
|
||||
$(CONTAINER_ENGINE) run --entrypoint make -it -v $(CURRENT_DIR):/go/src/sigs.k8s.io/descheduler -w /go/src/sigs.k8s.io/descheduler golang:$(GO_VERSION) gen
|
||||
|
||||
verify-gen:
|
||||
./hack/verify-conversions.sh
|
||||
./hack/verify-deep-copies.sh
|
||||
./hack/verify-defaulters.sh
|
||||
./hack/verify-docs.sh
|
||||
|
||||
#undo go mod changes caused by above.
|
||||
go mod tidy
|
||||
lint:
|
||||
ifndef HAS_GOLANGCI
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
|
||||
endif
|
||||
./_output/bin/golangci-lint run -v
|
||||
./_output/bin/golangci-lint run
|
||||
|
||||
fmt:
|
||||
ifndef HAS_GOFUMPT
|
||||
go install mvdan.cc/gofumpt@${GOFUMPT_VERSION}
|
||||
endif
|
||||
gofumpt -w -extra .
|
||||
|
||||
# helm
|
||||
|
||||
ensure-helm-install:
|
||||
lint-chart:
|
||||
ifndef HAS_HELM
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||
endif
|
||||
|
||||
lint-chart: ensure-helm-install
|
||||
helm lint ./charts/descheduler
|
||||
|
||||
build-helm:
|
||||
helm package ./charts/descheduler --dependency-update --destination ./bin/chart
|
||||
|
||||
test-helm: ensure-helm-install
|
||||
./test/run-helm-tests.sh
|
||||
|
||||
kind-multi-node:
|
||||
kind create cluster --name kind --config ./hack/kind_config.yaml --wait 2m
|
||||
|
||||
ct-helm:
|
||||
./hack/verify-chart.sh
|
||||
|
||||
25
OWNERS
25
OWNERS
@@ -1,22 +1,11 @@
|
||||
approvers:
|
||||
- damemi
|
||||
- ingvagabund
|
||||
- seanmalloy
|
||||
- a7i
|
||||
- knelasevero
|
||||
- ricardomaraschini
|
||||
reviewers:
|
||||
- damemi
|
||||
- seanmalloy
|
||||
- ingvagabund
|
||||
- lixiang233
|
||||
- a7i
|
||||
- janeliul
|
||||
- knelasevero
|
||||
- jklaw90
|
||||
- googs1025
|
||||
- ricardomaraschini
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
reviewers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
- ravisantoshgudimetla
|
||||
- damemi
|
||||
- seanmalloy
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.34.0
|
||||
appVersion: 0.34.0
|
||||
version: 0.18.1
|
||||
appVersion: 0.18.0
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
@@ -12,5 +12,5 @@ icon: https://kubernetes.io/images/favicon.png
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
maintainers:
|
||||
- name: Kubernetes SIG Scheduling
|
||||
email: sig-scheduling@kubernetes.io
|
||||
- name: stevehipwell
|
||||
email: steve.hipwell@github.com
|
||||
|
||||
@@ -6,12 +6,12 @@
|
||||
|
||||
```shell
|
||||
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||
helm install my-release --namespace kube-system descheduler/descheduler
|
||||
$ helm install descheduler/descheduler --name my-release
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job with a default DeschedulerPolicy on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. To preview what changes descheduler would make without actually going forward with the changes, you can install descheduler in dry run mode by providing the flag `--set cmdOptions.dry-run=true` to the `helm install` command below.
|
||||
This chart bootstraps a [desheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -22,7 +22,7 @@ This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/desched
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install --namespace kube-system my-release descheduler/descheduler
|
||||
helm install --name my-release descheduler/descheduler
|
||||
```
|
||||
|
||||
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
@@ -43,53 +43,17 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `cronJobAnnotations` | Annotations to add to the descheduler CronJob | `{}` |
|
||||
| `cronJobLabels` | Labels to add to the descheduler CronJob | `{}` |
|
||||
| `jobAnnotations` | Annotations to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `jobLabels` | Labels to add to the descheduler Job resources (created by CronJob) | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `image.repository` | Docker repository to use | `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
|
||||
@@ -1,22 +1 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq (.Values.replicas | int) 1 }}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- if .Values.leaderElection }}
|
||||
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
|
||||
WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
A DeschedulerPolicy has been applied for you. You can view the policy with:
|
||||
|
||||
kubectl get configmap -n {{ include "descheduler.namespace" . }} {{ template "descheduler.fullname" . }} -o yaml
|
||||
|
||||
If you wish to define your own policies out of band from this chart, you may define a configmap named {{ template "descheduler.fullname" . }}.
|
||||
To avoid a conflict between helm and your out of band method to deploy the configmap, please set deschedulerPolicy in values.yaml to an empty object as below.
|
||||
|
||||
deschedulerPolicy: {}
|
||||
{{- end }}
|
||||
Descheduler installed as a cron job.
|
||||
|
||||
@@ -24,14 +24,6 @@ If release name contains chart name it will be used as a full name.
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the namespace of the release.
|
||||
Allows overriding it for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "descheduler.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
@@ -50,17 +42,6 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.commonLabels}}
|
||||
{{ toYaml .Values.commonLabels }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "descheduler.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@@ -73,32 +54,3 @@ Create the name of the service account to use
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Leader Election
|
||||
*/}}
|
||||
{{- define "descheduler.leaderElection"}}
|
||||
{{- if .Values.leaderElection -}}
|
||||
- --leader-elect={{ .Values.leaderElection.enabled }}
|
||||
{{- if .Values.leaderElection.leaseDuration }}
|
||||
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.renewDeadline }}
|
||||
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.retryPeriod }}
|
||||
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceLock }}
|
||||
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
|
||||
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
|
||||
{{- if $resourceNamespace -}}
|
||||
- --leader-elect-resource-namespace={{ $resourceNamespace }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,43 +6,16 @@ metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy }}
|
||||
{{- range .Values.deschedulerPolicy.metricsProviders }}
|
||||
{{- if and (hasKey . "source") (eq .source "KubernetesMetrics") }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -12,5 +12,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ tpl (toYaml .Values.deschedulerPolicy) . | trim | indent 4 }}
|
||||
{{- end }}
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
|
||||
@@ -1,55 +1,14 @@
|
||||
{{- if eq .Values.kind "CronJob" }}
|
||||
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- if .Values.cronJobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.cronJobAnnotations | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.cronJobLabels }}
|
||||
{{- .Values.cronJobLabels | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
suspend: {{ .Values.suspend }}
|
||||
{{- end }}
|
||||
concurrencyPolicy: "Forbid"
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
{{- if or .Values.jobAnnotations .Values.jobLabels }}
|
||||
metadata:
|
||||
{{- if .Values.jobAnnotations }}
|
||||
annotations:
|
||||
{{- .Values.jobAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.jobLabels }}
|
||||
labels:
|
||||
{{- .Values.jobLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||
{{- end }}
|
||||
{{- if .Values.activeDeadlineSeconds }}
|
||||
activeDeadlineSeconds: {{ .Values.activeDeadlineSeconds }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
@@ -59,83 +18,36 @@ spec:
|
||||
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 12 }}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 16 }}
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
- {{ printf "--%s" $key | quote }}
|
||||
{{- if $value }}
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 16 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.annotations }}
|
||||
annotations: {{- toYaml .Values.deploymentAnnotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
|
||||
{{- else }}
|
||||
replicas: 1
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- if kindIs "bool" .Values.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if and .Values.extraServiceAccountVolumeMounts (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- if and .Values.extraServiceAccountVolumes (not .Values.automountServiceAccountToken) }}
|
||||
{{ toYaml .Values.extraServiceAccountVolumes | nindent 8}}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,27 +0,0 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.service.enabled true }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilies }}
|
||||
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10258
|
||||
protocol: TCP
|
||||
targetPort: 10258
|
||||
selector:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 4 }}
|
||||
type: ClusterIP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +1,8 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if kindIs "bool" .Values.serviceAccount.automountServiceAccountToken }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.serviceMonitor.enabled true }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}-servicemonitor
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "descheduler.namespace" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
endpoints:
|
||||
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
|
||||
port: http-metrics
|
||||
{{- if .Values.serviceMonitor.interval }}
|
||||
interval: {{ .Values.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.serviceMonitor.serverName }}
|
||||
{{- end}}
|
||||
{{- if .Values.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
1
charts/descheduler/tests/.gitignore
vendored
1
charts/descheduler/tests/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
__snapshot__
|
||||
@@ -1,109 +0,0 @@
|
||||
suite: Test Descheduler CronJob and Job Annotations and Labels
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: adds cronJob and job annotations and labels when set
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
monitoring.company.com/scrape: "true"
|
||||
description: "test cronjob"
|
||||
cronJobLabels:
|
||||
environment: "test"
|
||||
team: "platform"
|
||||
jobAnnotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
job.company.com/retry-limit: "3"
|
||||
jobLabels:
|
||||
job-type: "maintenance"
|
||||
priority: "high"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations["monitoring.company.com/scrape"]
|
||||
value: "true"
|
||||
- equal:
|
||||
path: metadata.annotations.description
|
||||
value: "test cronjob"
|
||||
- equal:
|
||||
path: metadata.labels.environment
|
||||
value: "test"
|
||||
- equal:
|
||||
path: metadata.labels.team
|
||||
value: "platform"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["sidecar.istio.io/inject"]
|
||||
value: "false"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations["job.company.com/retry-limit"]
|
||||
value: "3"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-type
|
||||
value: "maintenance"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.priority
|
||||
value: "high"
|
||||
|
||||
- it: does not add cronJob and job metadata when not set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isNull:
|
||||
path: metadata.annotations
|
||||
- isNotNull:
|
||||
path: metadata.labels
|
||||
- equal:
|
||||
path: metadata.labels["app.kubernetes.io/name"]
|
||||
value: descheduler
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: does not add job metadata when job annotations and labels are empty
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
jobAnnotations: {}
|
||||
jobLabels: {}
|
||||
asserts:
|
||||
- isNull:
|
||||
path: spec.jobTemplate.metadata
|
||||
|
||||
- it: works with all annotation and label types together
|
||||
template: templates/cronjob.yaml
|
||||
set:
|
||||
cronJobAnnotations:
|
||||
cron-annotation: "cron-value"
|
||||
cronJobLabels:
|
||||
cron-label: "cron-value"
|
||||
jobAnnotations:
|
||||
job-annotation: "job-value"
|
||||
jobLabels:
|
||||
job-label: "job-value"
|
||||
podAnnotations:
|
||||
pod-annotation: "pod-value"
|
||||
podLabels:
|
||||
pod-label: "pod-value"
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.annotations.cron-annotation
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: metadata.labels.cron-label
|
||||
value: "cron-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.annotations.job-annotation
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.metadata.labels.job-label
|
||||
value: "job-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.annotations.pod-annotation
|
||||
value: "pod-value"
|
||||
- equal:
|
||||
path: spec.jobTemplate.spec.template.metadata.labels.pod-label
|
||||
value: "pod-value"
|
||||
@@ -1,17 +0,0 @@
|
||||
suite: Test Descheduler CronJob
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: creates CronJob when kind is set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: CronJob
|
||||
@@ -1,49 +0,0 @@
|
||||
suite: Test Descheduler Deployment
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: Deployment
|
||||
|
||||
tests:
|
||||
- it: creates Deployment when kind is set
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
|
||||
- it: enables leader-election
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect=true
|
||||
|
||||
- it: support leader-election resourceNamespace
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamespace: test
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=test
|
||||
|
||||
- it: support legacy leader-election resourceNamescape
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamescape: typo
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=typo
|
||||
@@ -2,189 +2,51 @@
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# CronJob or Deployment
|
||||
kind: CronJob
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/descheduler/descheduler
|
||||
repository: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler
|
||||
# Overrides the image tag whose default is the chart version
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets:
|
||||
# - name: container-registry-secret
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 1000
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
cronJobApiVersion: "batch/v1"
|
||||
schedule: "*/2 * * * *"
|
||||
suspend: false
|
||||
# startingDeadlineSeconds: 200
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished: 600
|
||||
# activeDeadlineSeconds: 60 # Make sure this value is SHORTER than the cron interval.
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
|
||||
# Specifies the replica count for Deployment
|
||||
# Set leaderElection if you want to use more than 1 replica
|
||||
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
|
||||
# only if that node is in the same zone as at least one already-running descheduler
|
||||
replicas: 1
|
||||
|
||||
# Specifies whether Leader Election resources should be created
|
||||
# Required when running as a Deployment
|
||||
# NOTE: Leader election can't be activated if DryRun enabled
|
||||
leaderElection: {}
|
||||
# enabled: true
|
||||
# leaseDuration: 15s
|
||||
# renewDeadline: 10s
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamespace: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
# evict-local-storage-pods:
|
||||
# max-pods-to-evict-per-node: 10
|
||||
# node-selector: "key1=value1,key2=value2"
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsProviders:
|
||||
# - source: KubernetesMetrics
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
# serviceName: ""
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
podProtections:
|
||||
defaultDisabled:
|
||||
- "PodsWithLocalStorage"
|
||||
extraEnabled:
|
||||
- "PodsWithPVC"
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeAffinity
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeTaints:
|
||||
enabled: true
|
||||
RemovePodsViolatingNodeAffinity:
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
RemovePodsViolatingInterPodAntiAffinity:
|
||||
enabled: true
|
||||
LowNodeUtilization:
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
cpu: 20
|
||||
memory: 20
|
||||
pods: 20
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 50
|
||||
pods: 50
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
nodeSelector: {}
|
||||
# foo: bar
|
||||
|
||||
affinity: {}
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/e2e-az-name
|
||||
# operator: In
|
||||
# values:
|
||||
# - e2e-az1
|
||||
# - e2e-az2
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
# value: 'tool'
|
||||
# effect: 'NoSchedule'
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
@@ -195,103 +57,3 @@ serviceAccount:
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
# Opt out of API credential automounting
|
||||
#
|
||||
# automountServiceAccountToken Default is not set
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Mount the ServiceAccountToken in the Pod of a CronJob or Deployment
|
||||
# Default is not set - but only implied by the ServiceAccount
|
||||
# automountServiceAccountToken: true
|
||||
|
||||
# Annotations that'll be applied to deployment
|
||||
deploymentAnnotations: {}
|
||||
|
||||
cronJobAnnotations: {}
|
||||
|
||||
cronJobLabels: {}
|
||||
|
||||
jobAnnotations: {}
|
||||
|
||||
jobLabels: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 20
|
||||
timeoutSeconds: 5
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
|
||||
#
|
||||
ipFamilyPolicy: ""
|
||||
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
# E.g.
|
||||
# ipFamilies:
|
||||
# - IPv6
|
||||
# - IPv4
|
||||
ipFamilies: []
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
additionalLabels: {}
|
||||
# prometheus: kube-prometheus-stack
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
serverName: null
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'descheduler_(build_info|pods_evicted)'
|
||||
# sourceLabels: [__name__]
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
## Additional Volume mounts when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumeMounts:
|
||||
# - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
|
||||
# name: kube-api-access
|
||||
# readOnly: true
|
||||
|
||||
## Additional Volumes when automountServiceAccountToken is false
|
||||
# extraServiceAccountVolumes:
|
||||
# - name: kube-api-access
|
||||
# projected:
|
||||
# defaultMode: 0444
|
||||
# sources:
|
||||
# - configMap:
|
||||
# items:
|
||||
# - key: ca.crt
|
||||
# path: ca.crt
|
||||
# name: kube-root-ca.crt
|
||||
# - downwardAPI:
|
||||
# items:
|
||||
# - fieldRef:
|
||||
# apiVersion: v1
|
||||
# fieldPath: metadata.namespace
|
||||
# path: namespace
|
||||
# - serviceAccountToken:
|
||||
# expirationSeconds: 3600
|
||||
# path: token
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
|
||||
# this must be specified in seconds. If omitted, defaults to 600s (10 mins)
|
||||
timeout: 3600s
|
||||
timeout: 1200s
|
||||
# this prevents errors if you don't use both _GIT_TAG and _PULL_BASE_REF,
|
||||
# or any new substitutions added in the future.
|
||||
options:
|
||||
substitution_option: ALLOW_LOOSE
|
||||
steps:
|
||||
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
|
||||
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
|
||||
entrypoint: make
|
||||
env:
|
||||
- DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
- VERSION=$_GIT_TAG
|
||||
- BASE_REF=$_PULL_BASE_REF
|
||||
args:
|
||||
- push-all
|
||||
- push
|
||||
substitutions:
|
||||
# _GIT_TAG will be filled with a git-based tag for the image, of the form vYYYYMMDD-hash, and
|
||||
# can be used as a substitution
|
||||
|
||||
@@ -18,136 +18,44 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
promapi "github.com/prometheus/client_golang/api"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
restclient "k8s.io/client-go/rest"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
|
||||
// install the componentconfig api so we get its defaulting and conversion functions
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"sigs.k8s.io/descheduler/pkg/features"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultDeschedulerPort = 10258
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// DeschedulerServer configuration
|
||||
type DeschedulerServer struct {
|
||||
componentconfig.DeschedulerConfiguration
|
||||
|
||||
Client clientset.Interface
|
||||
EventClient clientset.Interface
|
||||
MetricsClient metricsclient.Interface
|
||||
PrometheusClient promapi.Client
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
SecureServingInfo *apiserver.SecureServingInfo
|
||||
DisableMetrics bool
|
||||
EnableHTTP2 bool
|
||||
// FeatureGates enabled by the user
|
||||
FeatureGates map[string]bool
|
||||
// DefaultFeatureGates for internal accessing so unit tests can enable/disable specific features
|
||||
DefaultFeatureGates featuregate.FeatureGate
|
||||
Client clientset.Interface
|
||||
}
|
||||
|
||||
// NewDeschedulerServer creates a new DeschedulerServer with default parameters
|
||||
func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||
cfg, err := newDefaultComponentConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewDeschedulerServer() *DeschedulerServer {
|
||||
versioned := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versioned)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Convert(versioned, &cfg, nil)
|
||||
s := DeschedulerServer{
|
||||
DeschedulerConfiguration: cfg,
|
||||
}
|
||||
|
||||
secureServing := apiserveroptions.NewSecureServingOptions().WithLoopback()
|
||||
secureServing.BindPort = DefaultDeschedulerPort
|
||||
|
||||
return &DeschedulerServer{
|
||||
DeschedulerConfiguration: *cfg,
|
||||
SecureServing: secureServing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||
versionedCfg := v1alpha1.DeschedulerConfiguration{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
|
||||
ResourceLock: "leases",
|
||||
ResourceName: "descheduler",
|
||||
ResourceNamespace: "kube-system",
|
||||
},
|
||||
}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{
|
||||
Tracing: componentconfig.TracingConfiguration{},
|
||||
}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
return &s
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific SchedulerServer to the specified FlagSet
|
||||
func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&rs.DeschedulingInterval, "descheduling-interval", rs.DeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "kubeconfig", rs.ClientConnection.Kubeconfig, "File with kube configuration. Deprecated, use client-connection-kubeconfig instead.")
|
||||
fs.StringVar(&rs.ClientConnection.Kubeconfig, "client-connection-kubeconfig", rs.ClientConnection.Kubeconfig, "File path to kube configuration for interacting with kubernetes apiserver.")
|
||||
fs.Float32Var(&rs.ClientConnection.QPS, "client-connection-qps", rs.ClientConnection.QPS, "QPS to use for interacting with kubernetes apiserver.")
|
||||
fs.Int32Var(&rs.ClientConnection.Burst, "client-connection-burst", rs.ClientConnection.Burst, "Burst to use for interacting with kubernetes apiserver.")
|
||||
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
|
||||
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "Execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
fs.StringVar(&rs.Tracing.CollectorEndpoint, "otel-collector-endpoint", "", "Set this flag to the OpenTelemetry Collector Service Address")
|
||||
fs.StringVar(&rs.Tracing.TransportCert, "otel-transport-ca-cert", "", "Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode")
|
||||
fs.StringVar(&rs.Tracing.ServiceName, "otel-service-name", tracing.DefaultServiceName, "OTEL Trace name to be used with the resources")
|
||||
fs.StringVar(&rs.Tracing.ServiceNamespace, "otel-trace-namespace", "", "OTEL Trace namespace to be used with the resources")
|
||||
fs.Float64Var(&rs.Tracing.SampleRate, "otel-sample-rate", 1.0, "Sample rate to collect the Traces")
|
||||
fs.BoolVar(&rs.Tracing.FallbackToNoOpProviderOnError, "otel-fallback-no-op-on-error", false, "Fallback to NoOp Tracer in case of error")
|
||||
fs.BoolVar(&rs.EnableHTTP2, "enable-http2", false, "If http/2 should be enabled for the metrics and health check")
|
||||
fs.Var(cliflag.NewMapStringBool(&rs.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
|
||||
"Options are:\n"+strings.Join(features.DefaultMutableFeatureGate.KnownFeatures(), "\n"))
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
func (rs *DeschedulerServer) Apply() error {
|
||||
err := features.DefaultMutableFeatureGate.SetFromMap(rs.FeatureGates)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.DefaultFeatureGates = features.DefaultMutableFeatureGate
|
||||
|
||||
// loopbackClientConfig is a config for a privileged loopback connection
|
||||
var loopbackClientConfig *restclient.Config
|
||||
var secureServing *apiserver.SecureServingInfo
|
||||
if err := rs.SecureServing.ApplyTo(&secureServing, &loopbackClientConfig); err != nil {
|
||||
klog.ErrorS(err, "failed to apply secure server configuration")
|
||||
return err
|
||||
}
|
||||
|
||||
if secureServing != nil {
|
||||
secureServing.DisableHTTP2 = !rs.EnableHTTP2
|
||||
rs.SecureServingInfo = secureServing
|
||||
}
|
||||
|
||||
return nil
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
|
||||
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
|
||||
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
|
||||
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "Limits the maximum number of pods to be evicted per node by descheduler")
|
||||
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
|
||||
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "Enables evicting pods using local storage by descheduler")
|
||||
}
|
||||
|
||||
@@ -18,122 +18,44 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"io"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/tracing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/apiserver/pkg/server/mux"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
aflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/component-base/logs"
|
||||
logsapi "k8s.io/component-base/logs/api/v1"
|
||||
_ "k8s.io/component-base/logs/json/register"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// NewDeschedulerCommand creates a *cobra.Command object with default parameters
|
||||
func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
s, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "unable to initialize server")
|
||||
}
|
||||
|
||||
featureGate := featuregate.NewFeatureGate()
|
||||
logConfig := logsapi.NewLoggingConfiguration()
|
||||
|
||||
s := options.NewDeschedulerServer()
|
||||
cmd := &cobra.Command{
|
||||
Use: "descheduler",
|
||||
Short: "descheduler",
|
||||
Long: "The descheduler evicts pods which may be bound to less desired nodes",
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
logs.InitLogs()
|
||||
if logsapi.ValidateAndApply(logConfig, featureGate); err != nil {
|
||||
return err
|
||||
defer logs.FlushLogs()
|
||||
err := Run(s)
|
||||
if err != nil {
|
||||
klog.Errorf("%v", err)
|
||||
}
|
||||
descheduler.SetupPlugins()
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err = s.Apply(); err != nil {
|
||||
klog.ErrorS(err, "failed to apply")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = Run(cmd.Context(), s); err != nil {
|
||||
klog.ErrorS(err, "failed to run descheduler server")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.SetOut(out)
|
||||
cmd.SetOutput(out)
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
s.AddFlags(flags)
|
||||
|
||||
runtime.Must(logsapi.AddFeatureGates(featureGate))
|
||||
logsapi.AddFlags(logConfig, flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func Run(rootCtx context.Context, rs *options.DeschedulerServer) error {
|
||||
ctx, done := signal.NotifyContext(rootCtx, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
|
||||
if !rs.DisableMetrics {
|
||||
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
|
||||
}
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
var stoppedCh <-chan struct{}
|
||||
var err error
|
||||
if rs.SecureServingInfo != nil {
|
||||
stoppedCh, _, err = rs.SecureServingInfo.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = tracing.NewTracerProvider(ctx, rs.Tracing.CollectorEndpoint, rs.Tracing.TransportCert, rs.Tracing.ServiceName, rs.Tracing.ServiceNamespace, rs.Tracing.SampleRate, rs.Tracing.FallbackToNoOpProviderOnError)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "failed to create tracer provider")
|
||||
}
|
||||
defer func() {
|
||||
// we give the tracing.Shutdown() its own context as the
|
||||
// original context may have been cancelled already. we
|
||||
// have arbitrarily chosen the timeout duration.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
tracing.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
// increase the fake watch channel so the dry-run mode can be run
|
||||
// over a cluster with thousands of pods
|
||||
watch.DefaultChanSize = 100000
|
||||
err = descheduler.Run(ctx, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done()
|
||||
if stoppedCh != nil {
|
||||
// wait for metrics server to close
|
||||
<-stoppedCh
|
||||
}
|
||||
|
||||
return nil
|
||||
func Run(rs *options.DeschedulerServer) error {
|
||||
return descheduler.Run(rs)
|
||||
}
|
||||
|
||||
@@ -18,19 +18,70 @@ package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// gitCommit is a constant representing the source version that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
gitCommit string
|
||||
// version is a constant representing the version tag that
|
||||
// generated this build. It should be set during build via -ldflags.
|
||||
version string
|
||||
// buildDate in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
//It should be set during build via -ldflags.
|
||||
buildDate string
|
||||
)
|
||||
|
||||
// Info holds the information related to descheduler app version.
|
||||
type Info struct {
|
||||
Major string `json:"major"`
|
||||
Minor string `json:"minor"`
|
||||
GitCommit string `json:"gitCommit"`
|
||||
GitVersion string `json:"gitVersion"`
|
||||
BuildDate string `json:"buildDate"`
|
||||
GoVersion string `json:"goVersion"`
|
||||
Compiler string `json:"compiler"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
|
||||
// Get returns the overall codebase version. It's for detecting
|
||||
// what code a binary was built from.
|
||||
func Get() Info {
|
||||
majorVersion, minorVersion := splitVersion(version)
|
||||
return Info{
|
||||
Major: majorVersion,
|
||||
Minor: minorVersion,
|
||||
GitCommit: gitCommit,
|
||||
GitVersion: version,
|
||||
BuildDate: buildDate,
|
||||
GoVersion: runtime.Version(),
|
||||
Compiler: runtime.Compiler,
|
||||
Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
}
|
||||
|
||||
func NewVersionCommand() *cobra.Command {
|
||||
versionCmd := &cobra.Command{
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Version of descheduler",
|
||||
Long: `Prints the version of descheduler.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Descheduler version %+v\n", version.Get())
|
||||
fmt.Printf("Descheduler version %+v\n", Get())
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
|
||||
// splitVersion splits the git version to generate major and minor versions needed.
|
||||
func splitVersion(version string) (string, string) {
|
||||
if version == "" {
|
||||
return "", ""
|
||||
}
|
||||
// A sample version would be of form v0.1.0-7-ge884046, so split at first '.' and
|
||||
// then return 0 and 1+(+ appended to follow semver convention) for major and minor versions.
|
||||
return strings.Trim(strings.Split(version, ".")[0], "v"), strings.Split(version, ".")[1] + "+"
|
||||
}
|
||||
|
||||
@@ -17,9 +17,10 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/component-base/cli"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
@@ -27,7 +28,9 @@ func main() {
|
||||
out := os.Stdout
|
||||
cmd := app.NewDeschedulerCommand(out)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
flag.CommandLine.Parse([]string{})
|
||||
if err := cmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
## descheduler
|
||||
|
||||
descheduler
|
||||
|
||||
### Synopsis
|
||||
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
```
|
||||
descheduler [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces and IP address families will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--client-connection-burst int32 Burst to use for interacting with kubernetes apiserver.
|
||||
--client-connection-kubeconfig string File path to kube configuration for interacting with kubernetes apiserver.
|
||||
--client-connection-qps float32 QPS to use for interacting with kubernetes apiserver.
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-http2-serving If true, HTTP2 serving will be disabled [default=false]
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run Execute descheduler in dry run mode.
|
||||
--enable-http2 If http/2 should be enabled for the metrics and health check
|
||||
--feature-gates mapStringBool A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
|
||||
AllAlpha=true|false (ALPHA - default=false)
|
||||
AllBeta=true|false (BETA - default=false)
|
||||
EvictionsInBackground=true|false (ALPHA - default=false)
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration. Deprecated, use client-connection-kubeconfig instead.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 2m17s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than the lease duration. This is only applicable if leader election is enabled. (default 1m47s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'leases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 26s)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--log-json-info-buffer-size quantity [Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-json-split-stream [Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-info-buffer-size quantity [Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--log-text-split-stream [Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.
|
||||
--logging-format string Sets the log format. Permitted formats: "json" (gated by LoggingBetaOptions), "text". (default "text")
|
||||
--otel-collector-endpoint string Set this flag to the OpenTelemetry Collector Service Address
|
||||
--otel-fallback-no-op-on-error Fallback to NoOp Tracer in case of error
|
||||
--otel-sample-rate float Sample rate to collect the Traces (default 1)
|
||||
--otel-service-name string OTEL Trace name to be used with the resources (default "descheduler")
|
||||
--otel-trace-namespace string OTEL Trace namespace to be used with the resources
|
||||
--otel-transport-ca-cert string Path of the CA Cert that can be used to generate the client Certificate for establishing secure connection to the OTEL in gRPC mode
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule pattern=N,... comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [descheduler version](descheduler_version.md) - Version of descheduler
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
## descheduler version
|
||||
|
||||
Version of descheduler
|
||||
|
||||
### Synopsis
|
||||
|
||||
Prints the version of descheduler.
|
||||
|
||||
```
|
||||
descheduler version [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for version
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [descheduler](descheduler.md) - descheduler
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
## Required Tools
|
||||
|
||||
- [Git](https://git-scm.com/downloads)
|
||||
- [Go 1.23+](https://golang.org/dl/)
|
||||
- [Go 1.13+](https://golang.org/dl/)
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl)
|
||||
- [kind v0.10.0+](https://kind.sigs.k8s.io/)
|
||||
- [kind](https://kind.sigs.k8s.io/)
|
||||
|
||||
## Build and Run
|
||||
|
||||
@@ -20,7 +20,7 @@ make
|
||||
|
||||
Run descheduler.
|
||||
```sh
|
||||
./_output/bin/descheduler --client-connection-kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
./_output/bin/descheduler --kubeconfig <path to kubeconfig> --policy-config-file <path-to-policy-file> --v 1
|
||||
```
|
||||
|
||||
View all CLI options.
|
||||
@@ -31,47 +31,12 @@ View all CLI options.
|
||||
## Run Tests
|
||||
```
|
||||
GOOS=linux make dev-image
|
||||
make kind-multi-node
|
||||
kind create cluster --config hack/kind_config.yaml
|
||||
kind load docker-image <image name>
|
||||
kind get kubeconfig > /tmp/admin.conf
|
||||
export KUBECONFIG=/tmp/admin.conf
|
||||
make test-unit
|
||||
make test-e2e
|
||||
```
|
||||
|
||||
## Format Code
|
||||
|
||||
After making changes in the code base, ensure that the code is formatted correctly:
|
||||
|
||||
```
|
||||
make fmt
|
||||
```
|
||||
|
||||
## Build Helm Package locally
|
||||
|
||||
If you made some changes in the chart, and just want to check if templating is ok, or if the chart is buildable, you can run this command to have a package built from the `./charts` directory.
|
||||
|
||||
```
|
||||
make build-helm
|
||||
```
|
||||
|
||||
## Lint Helm Chart locally
|
||||
|
||||
To check linting of your changes in the helm chart locally you can run:
|
||||
|
||||
```
|
||||
make lint-chart
|
||||
```
|
||||
|
||||
## Test helm changes locally with kind and ct
|
||||
|
||||
You will need kind and docker (or equivalent) installed. We can use ct public image to avoid installing ct and all its dependencies.
|
||||
|
||||
|
||||
```
|
||||
make kind-multi-node
|
||||
make ct-helm
|
||||
```
|
||||
|
||||
### Miscellaneous
|
||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||
See the [hack directory](https://github.com/kubernetes-sigs/descheduler/tree/master/hack) for additional tools and scripts used for developing the descheduler.
|
||||
@@ -1,16 +0,0 @@
|
||||
# Proposals
|
||||
This document walk you through about all the enhancements proposals for descheduler.
|
||||
|
||||
## Descheduler v1alpha2 Design Proposal
|
||||
```yaml
|
||||
title: Descheduler v1alpha2 Design Proposal
|
||||
authors:
|
||||
- "@damemi"
|
||||
link:
|
||||
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
|
||||
- https://github.com/kubernetes-sigs/descheduler/issues/679
|
||||
owning-sig: sig-scheduling
|
||||
creation-date: 2021-05-01
|
||||
status: implementable
|
||||
```
|
||||
|
||||
@@ -1,88 +1,34 @@
|
||||
# Release Guide
|
||||
|
||||
The process for publishing each Descheduler release includes a mixture of manual and automatic steps. Over
|
||||
time, it would be good to automate as much of this process as possible. However, due to current limitations there
|
||||
is care that must be taken to perform each manual step precisely so that the automated steps execute properly.
|
||||
## Container Image
|
||||
|
||||
## Pre-release Code Changes
|
||||
### Semi-automatic
|
||||
|
||||
Before publishing each release, the following code updates must be made:
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Publish a draft release using the tag you just created
|
||||
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||
7. Publish release
|
||||
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
- [ ] (Optional, but recommended) Bump `k8s.io` dependencies to the `-rc` tags. These tags are usually published around upstream code freeze. [Example](https://github.com/kubernetes-sigs/descheduler/pull/539)
|
||||
- [ ] Bump `k8s.io` dependencies to GA tags once they are published (following the upstream release). [Example](https://github.com/kubernetes-sigs/descheduler/pull/615)
|
||||
- [ ] Ensure that Go is updated to the same version as upstream. [Example](https://github.com/kubernetes-sigs/descheduler/pull/801)
|
||||
- [ ] Make CI changes in [github.com/kubernetes/test-infra](https://github.com/kubernetes/test-infra) to add the new version's tests (note, this may also include a Go bump). [Example](https://github.com/kubernetes/test-infra/pull/25833)
|
||||
- [ ] Update local CI versions for utils (such as golang-ci), kind, and go. [Example - e2e](https://github.com/kubernetes-sigs/descheduler/commit/ac4d576df8831c0c399ee8fff1e85469e90b8c44), [Example - helm](https://github.com/kubernetes-sigs/descheduler/pull/821)
|
||||
- [ ] Update version references in docs and Readme. [Example](https://github.com/kubernetes-sigs/descheduler/pull/617)
|
||||
### Manual
|
||||
|
||||
## Release Process
|
||||
|
||||
When the above pre-release steps are complete and the release is ready to be cut, perform the following steps **in order**
|
||||
(the flowchart below demonstrates these steps):
|
||||
|
||||
**Version release**
|
||||
1. Create the `git tag` on `master` for the release, eg `v0.24.0`
|
||||
2. Merge Helm chart version update to `master` (see [Helm chart](#helm-chart) below). [Example](https://github.com/kubernetes-sigs/descheduler/pull/709)
|
||||
3. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter). [Example](https://github.com/kubernetes/k8s.io/pull/3344)
|
||||
4. Cut release branch from `master`, eg `release-1.24`
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
**Patch release**
|
||||
1. Pick relevant code change commits to the matching release branch, eg `release-1.24`
|
||||
2. Create the patch tag on the release branch, eg `v0.24.1` on `release-1.24`
|
||||
3. Merge Helm chart version update to release branch
|
||||
4. Perform the image promotion process for the patch version
|
||||
5. Publish release using Github's release process from the git tag you created
|
||||
6. Email `sig-scheduling@kubernetes.io` to announce the release
|
||||
|
||||
### Flowchart
|
||||
|
||||

|
||||
|
||||
### Image promotion process
|
||||
|
||||
Every merge to any branch triggers an [image build and push](https://github.com/kubernetes/test-infra/blob/c36b8e5/config/jobs/image-pushing/k8s-staging-descheduler.yaml) to a `gcr.io` repository.
|
||||
These automated image builds are snapshots of the code in place at the time of every PR merge and
|
||||
tagged with the latest git SHA at the time of the build. To create a final release image, the desired
|
||||
auto-built image SHA is added to a [file upstream](https://github.com/kubernetes/k8s.io/blob/e9e971c/k8s.gcr.io/images/k8s-staging-descheduler/images.yaml) which
|
||||
copies that image to a public registry.
|
||||
|
||||
Automatic builds can be monitored and re-triggered with the [`post-descheduler-push-images` job](https://prow.k8s.io/?job=post-descheduler-push-images) on prow.k8s.io.
|
||||
|
||||
Note that images can also be manually built and pushed using `VERSION=$VERSION make push-all` by [users with access](https://github.com/kubernetes/k8s.io/blob/fbee8f67b70304241e613a672c625ad972998ad7/groups/sig-scheduling/groups.yaml#L33-L43).
|
||||
|
||||
## Helm Chart
|
||||
We currently use the [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) to automatically
|
||||
publish [Helm chart releases](https://github.com/kubernetes-sigs/descheduler/blob/022e07c/.github/workflows/release.yaml).
|
||||
This action is triggered when it detects any changes to [`Chart.yaml`](https://github.com/kubernetes-sigs/descheduler/blob/022e07c27853fade6d1304adc0a6ebe02642386c/charts/descheduler/Chart.yaml) on
|
||||
a `release-*` branch.
|
||||
|
||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `descheduler-helm-chart-*`. Example git tag name is `descheduler-helm-chart-0.18.0`.
|
||||
Released versions of the helm charts are stored in the `gh-pages` branch of this repo.
|
||||
|
||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version helm-descheduler-chart-0.18.0 corresponds
|
||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||
|
||||
1. Merge all helm chart changes into the master branch before the release is tagged/cut
|
||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||
2. Make sure your repo is clean by git's standards
|
||||
3. Follow the release-branch or patch release tagging pattern from the above section.
|
||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||
|
||||
## Notes
|
||||
The Helm releaser-action compares the changes in the action-triggering branch to the latest tag on that branch, so if you tag before creating the new branch there
|
||||
will be nothing to compare and it will fail. This is why it's necessary to tag, eg, `v0.24.0` *before* making the changes to the
|
||||
Helm chart version, so that there is a new diff for the action to find. (Tagging *after* making the Helm chart changes would
|
||||
also work, but then the code that gets built into the promoted image will be tagged as `descheduler-helm-chart-xxx` rather than `v0.xx.0`).
|
||||
1. Make sure your repo is clean by git's standards
|
||||
2. Create a release branch `git checkout -b release-1.18` (not required for patch releases)
|
||||
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
|
||||
4. Tag the repository and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
|
||||
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
|
||||
6. Build and push the container image to the staging registry `VERSION=$VERSION make push`
|
||||
7. Publish a draft release using the tag you just created
|
||||
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
|
||||
9. Publish release
|
||||
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
|
||||
|
||||
### Notes
|
||||
See [post-descheduler-push-images dashboard](https://testgrid.k8s.io/sig-scheduling#post-descheduler-push-images) for staging registry image build job status.
|
||||
|
||||
View the descheduler staging registry using [this URL](https://console.cloud.google.com/gcr/images/k8s-staging-descheduler/GLOBAL/descheduler) in a web browser
|
||||
or use the below `gcloud` commands.
|
||||
|
||||
List images in staging registry.
|
||||
```
|
||||
gcloud container images list --repository gcr.io/k8s-staging-descheduler
|
||||
@@ -102,3 +48,19 @@ Pull image from the staging registry.
|
||||
```
|
||||
docker pull gcr.io/k8s-staging-descheduler/descheduler:v20200206-0.9.0-94-ge2a23f284
|
||||
```
|
||||
|
||||
## Helm Chart
|
||||
Helm chart releases are managed by a separate set of git tags that are prefixed with `chart-*`. Example git tag name is `chart-0.18.0`. Released versions of the
|
||||
helm charts are stored in the `gh-pages` branch of this repo. The [chart-releaser-action GitHub Action](https://github.com/helm/chart-releaser-action) is setup to
|
||||
build and push the helm charts to the `gh-pages` branch when a `chart-*` git tag is created.
|
||||
|
||||
The major and minor version of the chart matches the descheduler major and minor versions. For example descheduler helm chart version chart-0.18.0 corresponds
|
||||
to descheduler version v0.18.0. The patch version of the descheduler helm chart and the patcher version of the descheduler will not necessarily match. The patch
|
||||
version of the descheduler helm chart is used to version changes specific to the helm chart.
|
||||
|
||||
1. Merge all helm chart changes into the appropriate release branch(i.e. release-1.18)
|
||||
1. Ensure that `appVersion` in file `charts/descheduler/Chart.yaml` matches the descheduler version(no `v` prefix)
|
||||
2. Ensure that `version` in file `charts/descheduler/Chart.yaml` has been incremented. This is the chart version.
|
||||
2. Make sure your repo is clean by git's standards
|
||||
3. Create the tag and push it `git checkout release-1.18; CHART_VERSION=chart-0.18.0; git tag $CHART_VERSION; git push origin $CHART_VERSION`
|
||||
4. Verify the new helm artifact has been successfully pushed to the `gh-pages` branch
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 121 KiB |
@@ -1,31 +1,52 @@
|
||||
# User Guide
|
||||
|
||||
Starting with descheduler release v0.10.0 container images are available in the official k8s container registry.
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |-------------------------------------------------|-------------------------|
|
||||
v0.34.0 | registry.k8s.io/descheduler/descheduler:v0.34.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.33.0 | registry.k8s.io/descheduler/descheduler:v0.33.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.32.0 | registry.k8s.io/descheduler/descheduler:v0.32.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.31.0 | registry.k8s.io/descheduler/descheduler:v0.31.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.1 | registry.k8s.io/descheduler/descheduler:v0.30.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.30.0 | registry.k8s.io/descheduler/descheduler:v0.30.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.29.0 | registry.k8s.io/descheduler/descheduler:v0.29.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
|
||||
Note that multi-arch container images cannot be pulled by [kind](https://kind.sigs.k8s.io) from a registry. Therefore
|
||||
starting with descheduler release v0.20.0 use the below process to download the official descheduler
|
||||
image into a kind cluster.
|
||||
```
|
||||
kind create cluster
|
||||
docker pull registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||
kind load docker-image registry.k8s.io/descheduler/descheduler:v0.20.0
|
||||
```
|
||||
Starting with descheduler release v0.10.0 container images are available in these container registries.
|
||||
* `asia.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
* `eu.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
* `us.gcr.io/k8s-artifacts-prod/descheduler/descheduler`
|
||||
|
||||
## Policy Configuration Examples
|
||||
The [examples](https://github.com/kubernetes-sigs/descheduler/tree/master/examples) directory has descheduler policy configuration examples.
|
||||
|
||||
## CLI Options
|
||||
The descheduler has many CLI options that can be used to override its default behavior. Please check the [CLI Options](./cli/descheduler.md) documentation for details
|
||||
The descheduler has many CLI options that can be used to override its default behavior.
|
||||
```
|
||||
descheduler --help
|
||||
The descheduler evicts pods which may be bound to less desired nodes
|
||||
|
||||
Usage:
|
||||
descheduler [flags]
|
||||
descheduler [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
version Version of descheduler
|
||||
|
||||
Flags:
|
||||
--add-dir-header If true, adds the file directory to the header
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
--evict-local-storage-pods Enables evicting pods using local storage by descheduler
|
||||
-h, --help help for descheduler
|
||||
--kubeconfig string File with kube configuration.
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log-dir string If non-empty, write log files in this directory
|
||||
--log-file string If non-empty, use this log file
|
||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--max-pods-to-evict-per-node int Limits the maximum number of pods to be evicted per node by descheduler
|
||||
--node-selector string Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--skip-headers If true, avoid header prefixes in the log messages
|
||||
--skip-log-headers If true, avoid headers when opening log files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
|
||||
Use "descheduler [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Production Use Cases
|
||||
This section contains descriptions of real world production use cases.
|
||||
@@ -47,82 +68,23 @@ descheduler -v=3 --evict-local-storage-pods --policy-config-file=pod-life-time.y
|
||||
This policy configuration file ensures that pods created more than 7 days ago are evicted.
|
||||
```
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "PodLifeTime"
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "PodLifeTime"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
If your cluster has been running for a long period of time, you may find that the resource utilization is not very
|
||||
balanced. The following two strategies can be used to rebalance your cluster based on `cpu`, `memory`
|
||||
or `number of pods`.
|
||||
|
||||
#### Balance high utilization nodes
|
||||
Using `LowNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization over 70% to nodes with memory utilization below 20%.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
```
|
||||
|
||||
#### Balance low utilization nodes
|
||||
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
|
||||
The evicted pods will be compacted into minimal set of nodes.
|
||||
|
||||
```
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "HighNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "HighNodeUtilization"
|
||||
```
|
||||
|
||||
### Autoheal Node Problems
|
||||
|
||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
|
||||
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
|
||||
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
|
||||
|
||||
---
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemoveFailedPods"
|
||||
args:
|
||||
reasons:
|
||||
- "OutOfcpu"
|
||||
- "CreateContainerConfigError"
|
||||
includingInitContainers: true
|
||||
excludeOwnerKinds:
|
||||
- "Job"
|
||||
minPodLifetimeSeconds: 3600
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemoveFailedPods"
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "HighNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "HighNodeUtilization"
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"memory": 20
|
||||
targetThresholds:
|
||||
"memory": 70
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
@@ -1,13 +1,8 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingNodeAffinity"
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingNodeAffinity"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: true
|
||||
params:
|
||||
nodeAffinityType:
|
||||
- "requiredDuringSchedulingIgnoredDuringExecution"
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
---
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "PodLifeTime"
|
||||
args:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
states:
|
||||
- "Pending"
|
||||
- "PodInitializing"
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "PodLifeTime"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: false
|
||||
"RemoveDuplicates":
|
||||
enabled: false
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeAffinity":
|
||||
enabled: false
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: false
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: false
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
maxPodLifeTimeSeconds: 604800 # 7 days
|
||||
|
||||
@@ -1,36 +1,25 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
balance:
|
||||
enabled:
|
||||
- "RemoveDuplicates"
|
||||
- "LowNodeUtilization"
|
||||
- "RemovePodsViolatingTopologySpreadConstraint"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
"RemovePodsHavingTooManyRestarts":
|
||||
enabled: true
|
||||
params:
|
||||
podsHavingTooManyRestarts:
|
||||
podRestartThresholds: 100
|
||||
includingInitContainers: true
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsHavingTooManyRestarts"
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
states:
|
||||
- CrashLoopBackOff
|
||||
plugins:
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsHavingTooManyRestarts"
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "RemovePodsViolatingTopologySpreadConstraint"
|
||||
args:
|
||||
constraints:
|
||||
- DoNotSchedule
|
||||
- ScheduleAnyway
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "RemovePodsViolatingTopologySpreadConstraint"
|
||||
142
go.mod
142
go.mod
@@ -1,138 +1,14 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.3
|
||||
|
||||
godebug default=go1.24
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/common v0.64.0
|
||||
github.com/spf13/cobra v1.10.0
|
||||
github.com/spf13/pflag v1.0.9
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
google.golang.org/grpc v1.72.2
|
||||
k8s.io/api v0.34.0
|
||||
k8s.io/apimachinery v0.34.0
|
||||
k8s.io/apiserver v0.34.0
|
||||
k8s.io/client-go v0.34.0
|
||||
k8s.io/code-generator v0.34.0
|
||||
k8s.io/component-base v0.34.0
|
||||
k8s.io/component-helpers v0.34.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/metrics v0.34.0
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
kubevirt.io/api v1.3.0
|
||||
kubevirt.io/client-go v1.3.0
|
||||
kubevirt.io/containerized-data-importer-api v1.60.1 // indirect; drops dependency on o/api
|
||||
sigs.k8s.io/mdtoc v1.1.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
k8s.io/apiserver v0.18.2
|
||||
k8s.io/client-go v0.18.2
|
||||
k8s.io/component-base v0.18.2
|
||||
k8s.io/klog v1.0.0
|
||||
)
|
||||
|
||||
require golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.24.0 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-kit/kit v0.13.0 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.4 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20210514010506-3b9f47219fe7 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/cel-go v0.26.0 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.30.0 // indirect
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect
|
||||
k8s.io/kms v0.34.0 // indirect
|
||||
k8s.io/kube-openapi v0.30.0 // indirect
|
||||
kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
|
||||
replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
BUILD
|
||||
CHANGELOG
|
||||
OWNERS
|
||||
go.mod
|
||||
go.sum
|
||||
vendor/
|
||||
@@ -1,257 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Usage Instructions: https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md
|
||||
|
||||
# Checkout a PR from GitHub. (Yes, this is sitting in a Git tree. How
|
||||
# meta.) Assumes you care about pulls from remote "upstream" and
|
||||
# checks them out to a branch named:
|
||||
# automated-cherry-pick-of-<pr>-<target branch>-<timestamp>
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT="$(git rev-parse --show-toplevel)"
|
||||
declare -r REPO_ROOT
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
STARTINGBRANCH=$(git symbolic-ref --short HEAD)
|
||||
declare -r STARTINGBRANCH
|
||||
declare -r REBASEMAGIC="${REPO_ROOT}/.git/rebase-apply"
|
||||
DRY_RUN=${DRY_RUN:-""}
|
||||
REGENERATE_DOCS=${REGENERATE_DOCS:-""}
|
||||
UPSTREAM_REMOTE=${UPSTREAM_REMOTE:-upstream}
|
||||
FORK_REMOTE=${FORK_REMOTE:-origin}
|
||||
MAIN_REPO_ORG=${MAIN_REPO_ORG:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $3}')}
|
||||
MAIN_REPO_NAME=${MAIN_REPO_NAME:-$(git remote get-url "$UPSTREAM_REMOTE" | awk '{gsub(/http[s]:\/\/|git@/,"")}1' | awk -F'[@:./]' 'NR==1{print $4}')}
|
||||
|
||||
if [[ -z ${GITHUB_USER:-} ]]; then
|
||||
echo "Please export GITHUB_USER=<your-user> (or GH organization, if that's where your fork lives)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v gh > /dev/null; then
|
||||
echo "Can't find 'gh' tool in PATH, please install from https://github.com/cli/cli"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$#" -lt 2 ]]; then
|
||||
echo "${0} <remote branch> <pr-number>...: cherry pick one or more <pr> onto <remote branch> and leave instructions for proposing pull request"
|
||||
echo
|
||||
echo " Checks out <remote branch> and handles the cherry-pick of <pr> (possibly multiple) for you."
|
||||
echo " Examples:"
|
||||
echo " $0 upstream/release-3.14 12345 # Cherry-picks PR 12345 onto upstream/release-3.14 and proposes that as a PR."
|
||||
echo " $0 upstream/release-3.14 12345 56789 # Cherry-picks PR 12345, then 56789 and proposes the combination as a single PR."
|
||||
echo
|
||||
echo " Set the DRY_RUN environment var to skip git push and creating PR."
|
||||
echo " This is useful for creating patches to a release branch without making a PR."
|
||||
echo " When DRY_RUN is set the script will leave you in a branch containing the commits you cherry-picked."
|
||||
echo
|
||||
echo " Set the REGENERATE_DOCS environment var to regenerate documentation for the target branch after picking the specified commits."
|
||||
echo " This is useful when picking commits containing changes to API documentation."
|
||||
echo
|
||||
echo " Set UPSTREAM_REMOTE (default: upstream) and FORK_REMOTE (default: origin)"
|
||||
echo " to override the default remote names to what you have locally."
|
||||
echo
|
||||
echo " For merge process info, see https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Checks if you are logged in. Will error/bail if you are not.
|
||||
gh auth status
|
||||
|
||||
if git_status=$(git status --porcelain --untracked=no 2>/dev/null) && [[ -n "${git_status}" ]]; then
|
||||
echo "!!! Dirty tree. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -e "${REBASEMAGIC}" ]]; then
|
||||
echo "!!! 'git rebase' or 'git am' in progress. Clean up and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -r BRANCH="$1"
|
||||
shift 1
|
||||
declare -r PULLS=( "$@" )
|
||||
|
||||
function join { local IFS="$1"; shift; echo "$*"; }
|
||||
PULLDASH=$(join - "${PULLS[@]/#/#}") # Generates something like "#12345-#56789"
|
||||
declare -r PULLDASH
|
||||
PULLSUBJ=$(join " " "${PULLS[@]/#/#}") # Generates something like "#12345 #56789"
|
||||
declare -r PULLSUBJ
|
||||
|
||||
echo "+++ Updating remotes..."
|
||||
git remote update "${UPSTREAM_REMOTE}" "${FORK_REMOTE}"
|
||||
|
||||
if ! git log -n1 --format=%H "${BRANCH}" >/dev/null 2>&1; then
|
||||
echo "!!! '${BRANCH}' not found. The second argument should be something like ${UPSTREAM_REMOTE}/release-0.21."
|
||||
echo " (In particular, it needs to be a valid, existing remote branch that I can 'git checkout'.)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NEWBRANCHREQ="automated-cherry-pick-of-${PULLDASH}" # "Required" portion for tools.
|
||||
declare -r NEWBRANCHREQ
|
||||
NEWBRANCH="$(echo "${NEWBRANCHREQ}-${BRANCH}" | sed 's/\//-/g')"
|
||||
declare -r NEWBRANCH
|
||||
NEWBRANCHUNIQ="${NEWBRANCH}-$(date +%s)"
|
||||
declare -r NEWBRANCHUNIQ
|
||||
echo "+++ Creating local branch ${NEWBRANCHUNIQ}"
|
||||
|
||||
cleanbranch=""
|
||||
gitamcleanup=false
|
||||
function return_to_kansas {
|
||||
if [[ "${gitamcleanup}" == "true" ]]; then
|
||||
echo
|
||||
echo "+++ Aborting in-progress git am."
|
||||
git am --abort >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
# return to the starting branch and delete the PR text file
|
||||
if [[ -z "${DRY_RUN}" ]]; then
|
||||
echo
|
||||
echo "+++ Returning you to the ${STARTINGBRANCH} branch and cleaning up."
|
||||
git checkout -f "${STARTINGBRANCH}" >/dev/null 2>&1 || true
|
||||
if [[ -n "${cleanbranch}" ]]; then
|
||||
git branch -D "${cleanbranch}" >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
SUBJECTS=()
|
||||
function make-a-pr() {
|
||||
local rel
|
||||
rel="$(basename "${BRANCH}")"
|
||||
echo
|
||||
echo "+++ Creating a pull request on GitHub at ${GITHUB_USER}:${NEWBRANCH}"
|
||||
|
||||
local numandtitle
|
||||
numandtitle=$(printf '%s\n' "${SUBJECTS[@]}")
|
||||
prtext=$(cat <<EOF
|
||||
Cherry pick of ${PULLSUBJ} on ${rel}.
|
||||
|
||||
${numandtitle}
|
||||
|
||||
For details on the cherry pick process, see the [cherry pick requests](https://git.k8s.io/community/contributors/devel/sig-release/cherry-picks.md) page.
|
||||
|
||||
\`\`\`release-note
|
||||
|
||||
\`\`\`
|
||||
EOF
|
||||
)
|
||||
|
||||
gh pr create --title="Automated cherry pick of ${numandtitle}" --body="${prtext}" --head "${GITHUB_USER}:${NEWBRANCH}" --base "${rel}" --repo="${MAIN_REPO_ORG}/${MAIN_REPO_NAME}"
|
||||
}
|
||||
|
||||
git checkout -b "${NEWBRANCHUNIQ}" "${BRANCH}"
|
||||
cleanbranch="${NEWBRANCHUNIQ}"
|
||||
|
||||
gitamcleanup=true
|
||||
for pull in "${PULLS[@]}"; do
|
||||
echo "+++ Downloading patch to /tmp/${pull}.patch (in case you need to do this again)"
|
||||
|
||||
curl -o "/tmp/${pull}.patch" -sSL "https://github.com/${MAIN_REPO_ORG}/${MAIN_REPO_NAME}/pull/${pull}.patch"
|
||||
echo
|
||||
echo "+++ About to attempt cherry pick of PR. To reattempt:"
|
||||
echo " $ git am -3 /tmp/${pull}.patch"
|
||||
echo
|
||||
git am -3 "/tmp/${pull}.patch" || {
|
||||
conflicts=false
|
||||
while unmerged=$(git status --porcelain | grep ^U) && [[ -n ${unmerged} ]] \
|
||||
|| [[ -e "${REBASEMAGIC}" ]]; do
|
||||
conflicts=true # <-- We should have detected conflicts once
|
||||
echo
|
||||
echo "+++ Conflicts detected:"
|
||||
echo
|
||||
(git status --porcelain | grep ^U) || echo "!!! None. Did you git am --continue?"
|
||||
echo
|
||||
echo "+++ Please resolve the conflicts in another window (and remember to 'git add / git am --continue')"
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
echo
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${conflicts}" != "true" ]]; then
|
||||
echo "!!! git am failed, likely because of an in-progress 'git am' or 'git rebase'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# set the subject
|
||||
subject=$(grep -m 1 "^Subject" "/tmp/${pull}.patch" | sed -e 's/Subject: \[PATCH//g' | sed 's/.*] //')
|
||||
SUBJECTS+=("#${pull}: ${subject}")
|
||||
|
||||
# remove the patch file from /tmp
|
||||
rm -f "/tmp/${pull}.patch"
|
||||
done
|
||||
gitamcleanup=false
|
||||
|
||||
# Re-generate docs (if needed)
|
||||
if [[ -n "${REGENERATE_DOCS}" ]]; then
|
||||
echo
|
||||
echo "Regenerating docs..."
|
||||
if ! hack/generate-docs.sh; then
|
||||
echo
|
||||
echo "hack/generate-docs.sh FAILED to complete."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DRY_RUN}" ]]; then
|
||||
echo "!!! Skipping git push and PR creation because you set DRY_RUN."
|
||||
echo "To return to the branch you were in when you invoked this script:"
|
||||
echo
|
||||
echo " git checkout ${STARTINGBRANCH}"
|
||||
echo
|
||||
echo "To delete this branch:"
|
||||
echo
|
||||
echo " git branch -D ${NEWBRANCHUNIQ}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if git remote -v | grep ^"${FORK_REMOTE}" | grep "${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"; then
|
||||
echo "!!! You have ${FORK_REMOTE} configured as your ${MAIN_REPO_ORG}/${MAIN_REPO_NAME}.git"
|
||||
echo "This isn't normal. Leaving you with push instructions:"
|
||||
echo
|
||||
echo "+++ First manually push the branch this script created:"
|
||||
echo
|
||||
echo " git push REMOTE ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
echo "where REMOTE is your personal fork (maybe ${UPSTREAM_REMOTE}? Consider swapping those.)."
|
||||
echo "OR consider setting UPSTREAM_REMOTE and FORK_REMOTE to different values."
|
||||
echo
|
||||
make-a-pr
|
||||
cleanbranch=""
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "+++ I'm about to do the following to push to GitHub (and I'm assuming ${FORK_REMOTE} is your personal fork):"
|
||||
echo
|
||||
echo " git push ${FORK_REMOTE} ${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
echo
|
||||
read -p "+++ Proceed (anything other than 'y' aborts the cherry-pick)? [y/n] " -r
|
||||
if ! [[ "${REPLY}" =~ ^[yY]$ ]]; then
|
||||
echo "Aborting." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
git push "${FORK_REMOTE}" -f "${NEWBRANCHUNIQ}:${NEWBRANCH}"
|
||||
make-a-pr
|
||||
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app"
|
||||
)
|
||||
|
||||
var docGenPath = "docs/cli"
|
||||
|
||||
func main() {
|
||||
cmd := app.NewDeschedulerCommand(os.Stdout)
|
||||
cmd.AddCommand(app.NewVersionCommand())
|
||||
cmd.DisableAutoGenTag = true // Disable this so that the diff wont track it
|
||||
if err := doc.GenMarkdownTree(cmd, docGenPath); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
|
||||
|
||||
create_cluster() {
|
||||
echo "#################### Creating instances ##########################"
|
||||
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
# Keeping the --zone here so as to make sure that e2e's can run locally.
|
||||
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
|
||||
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
|
||||
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
|
||||
# Delete the firewall port created for master.
|
||||
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
|
||||
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
|
||||
|
||||
|
||||
transfer_install_files() {
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
}
|
||||
|
||||
|
||||
@@ -55,19 +55,19 @@ install_kube() {
|
||||
# Docker installation.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
|
||||
# kubeadm installation.
|
||||
# 1. Transfer files to master, nodes.
|
||||
transfer_install_files
|
||||
# 2. Install kubeadm.
|
||||
#TODO: Add rm /tmp/kubeadm_install.sh
|
||||
# Open port for kube API server
|
||||
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
|
||||
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
|
||||
|
||||
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||
# Copy the kubeconfig file onto /tmp for e2e tests.
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
|
||||
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
|
||||
|
||||
@@ -75,15 +75,16 @@ install_kube() {
|
||||
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
|
||||
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
|
||||
|
||||
# Copy kubeadm_join to every node.
|
||||
# Copy kubeadm_join to every node.
|
||||
#TODO: Put these in a loop, so that extension becomes possible.
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
|
||||
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
|
||||
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,18 +1,6 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "topology.kubernetes.io/zone=local-a"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "topology.kubernetes.io/zone=local-b"
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Utility command based on 'find' command. The pipeline is as following:
|
||||
# 1. find all the go files; (exclude specific path: vendor etc)
|
||||
# 2. find all the files containing specific tags in contents;
|
||||
# 3. extract related dirs;
|
||||
# 4. remove duplicated paths;
|
||||
# 5. merge all dirs in array with delimiter ,;
|
||||
#
|
||||
# Example:
|
||||
# find_dirs_containing_comment_tags("+k8s:")
|
||||
# Return:
|
||||
# sigs.k8s.io/descheduler/a,sigs.k8s.io/descheduler/b,sigs.k8s.io/descheduler/c
|
||||
function find_dirs_containing_comment_tags() {
|
||||
array=()
|
||||
while IFS='' read -r line; do array+=("$line"); done < <( \
|
||||
find . -type f -name \*.go -not -path "./vendor/*" -not -path "./_tmp/*" -print0 \
|
||||
| xargs -0 grep --color=never -l "$@" \
|
||||
| xargs -n1 dirname \
|
||||
| LC_ALL=C sort -u \
|
||||
)
|
||||
|
||||
IFS=" ";
|
||||
printf '%s' "${array[*]}";
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2024 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# go::verify_version verifies the go version is supported by the project.
|
||||
# descheduler actively supports 3 versions, therefore 3 go versions are supported.
|
||||
go::verify_version() {
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.22|go1.23|go1.24') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This package imports things required by build scripts, to force `go mod` to see them as dependencies
|
||||
package tools
|
||||
|
||||
import (
|
||||
_ "github.com/client9/misspell/cmd/misspell"
|
||||
_ "k8s.io/code-generator"
|
||||
_ "sigs.k8s.io/mdtoc"
|
||||
)
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
go run ${SCRIPT_DIR}/doc-gen
|
||||
@@ -1,10 +1,9 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/conversion-gen" "k8s.io/code-generator/cmd/conversion-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/conversion-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--output-file zz_generated.conversion.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:conversion-gen=")
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.conversion
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/deepcopy-gen" "k8s.io/code-generator/cmd/deepcopy-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/deepcopy-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--output-file zz_generated.deepcopy.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:deepcopy-gen=")
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig,${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.deepcopy
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/generator-help.sh"
|
||||
|
||||
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
|
||||
|
||||
${OS_OUTPUT_BINPATH}/defaulter-gen \
|
||||
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha2" \
|
||||
--output-file zz_generated.defaults.go \
|
||||
$(find_dirs_containing_comment_tags "+k8s:defaulter-gen=")
|
||||
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
|
||||
--output-file-base zz_generated.defaults
|
||||
|
||||
@@ -20,9 +20,13 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
go::verify_version
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
${CONTAINER_ENGINE:-docker} run -it --rm --network host --workdir=/data --volume ~/.kube/config:/root/.kube/config:ro --volume $(pwd):/data quay.io/helmpack/chart-testing:v3.7.0 /bin/bash -c "git config --global --add safe.directory /data; ct install --config=.github/ci/ct.yaml --helm-extra-set-args=\"--set=kind=Deployment\""
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
./hack/update-generated-conversions.sh
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated conversions verified."
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
mkdir -p "${DESCHEDULER_ROOT}/_tmp"
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
./hack/update-generated-deep-copies.sh
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated deep-copies output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated deep-copies verified."
|
||||
@@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
|
||||
|
||||
_deschedulertmp="${_tmpdir}"
|
||||
mkdir -p "${_deschedulertmp}"
|
||||
|
||||
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
|
||||
_deschedulertmp="${_deschedulertmp}/descheduler"
|
||||
|
||||
pushd "${_deschedulertmp}" > /dev/null 2>&1
|
||||
./hack/update-generated-defaulters.sh
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
|
||||
echo "Generated defaulters output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
|
||||
fi
|
||||
popd > /dev/null 2>&1
|
||||
|
||||
echo "Generated Defaulters verified."
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
|
||||
temp_dir=$(mktemp -d)
|
||||
|
||||
go run -ldflags "-X main.docGenPath=${temp_dir}" ${SCRIPT_DIR}/doc-gen
|
||||
|
||||
if ! _out="$(diff -Naupr ${SCRIPT_DIR}/../docs/cli "${temp_dir}")"; then
|
||||
echo "Generated output differs:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Generated conversions verify failed. Please run ./hack/update-docs.sh"
|
||||
rm -rf ${temp_dir}
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf ${temp_dir}
|
||||
@@ -20,9 +20,13 @@ set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${DESCHEDULER_ROOT}/hack/lib/go.sh"
|
||||
|
||||
go::verify_version
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.2|go1.3|go1.4|go1.5|go1.6|go1.7|go1.8|go1.9|go1.10|go1.11|go1.12|go1.13') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${DESCHEDULER_ROOT}"
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
go vet ${OS_ROOT}/...
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2018 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script checks commonly misspelled English words in all files in the
|
||||
# working directory by client9/misspell package.
|
||||
# Usage: `hack/verify-spelling.sh`.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
export KUBE_ROOT
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${OS_OUTPUT_BINPATH}"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install tools we need
|
||||
pushd "${KUBE_ROOT}" >/dev/null
|
||||
GO111MODULE=on go install github.com/client9/misspell/cmd/misspell
|
||||
popd >/dev/null
|
||||
|
||||
# Spell checking
|
||||
# All the skipping files are defined in hack/.spelling_failures
|
||||
skipping_file="${KUBE_ROOT}/hack/.spelling_failures"
|
||||
failing_packages=$(sed "s| | -e |g" "${skipping_file}")
|
||||
git ls-files | grep -v -e "${failing_packages}" | xargs misspell -i "Creater,creater,ect" -error -o stderr
|
||||
@@ -70,7 +70,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
|
||||
ret=1
|
||||
fi
|
||||
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" -x "README*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
if ! _out="$(diff -Naupr -x "BUILD" -x "AUTHORS*" -x "CONTRIBUTORS*" vendor "${_deschedulertmp}/vendor")"; then
|
||||
echo "Your vendored results are different:" >&2
|
||||
echo "${_out}" >&2
|
||||
echo "Vendor Verify failed." >&2
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
title: descheduler integration with evacuation API as an alternative to eviction API
|
||||
kep-number: 1397
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
owning-sig: sig-scheduling
|
||||
participating-sigs:
|
||||
- sig-apps
|
||||
status: provisional
|
||||
creation-date: 2024-04-14
|
||||
reviewers:
|
||||
- atiratree
|
||||
approvers:
|
||||
- TBD
|
||||
feature-gates:
|
||||
- TBD
|
||||
stage: alpha
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 48 KiB |
@@ -1,29 +0,0 @@
|
||||
title: Descheduling framework
|
||||
kep-number: 753
|
||||
authors:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
owning-sig: sig-scheduling
|
||||
status: provisional
|
||||
creation-date: 2024-04-08
|
||||
reviewers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
approvers:
|
||||
- "@ingvagabund"
|
||||
- "@damemi"
|
||||
- "@a7i"
|
||||
- "@knelasevero"
|
||||
#replaces:
|
||||
|
||||
# The target maturity stage in the current dev cycle for this KEP.
|
||||
stage: alpha
|
||||
|
||||
# The most recent milestone for which work toward delivery of this KEP has been
|
||||
# done. This can be the current (upcoming) milestone, if it is being actively
|
||||
# worked on.
|
||||
# latest-milestone: "v1.19"
|
||||
|
||||
# disable-supported: true
|
||||
@@ -1,34 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: descheduler-policy-configmap
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha2"
|
||||
kind: "DeschedulerPolicy"
|
||||
profiles:
|
||||
- name: ProfileName
|
||||
pluginConfig:
|
||||
- name: "DefaultEvictor"
|
||||
- name: "RemovePodsViolatingInterPodAntiAffinity"
|
||||
- name: "RemoveDuplicates"
|
||||
- name: "LowNodeUtilization"
|
||||
args:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- "LowNodeUtilization"
|
||||
- "RemoveDuplicates"
|
||||
deschedule:
|
||||
enabled:
|
||||
- "RemovePodsViolatingInterPodAntiAffinity"
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- rbac.yaml
|
||||
@@ -1,81 +0,0 @@
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
rules:
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["nodes", "pods"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: descheduler-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: descheduler-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: descheduler-cluster-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: descheduler-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: descheduler-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
28
kubernetes/configmap.yaml
Normal file
28
kubernetes/configmap.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: descheduler-policy-configmap
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemoveDuplicates":
|
||||
enabled: true
|
||||
"RemovePodsViolatingInterPodAntiAffinity":
|
||||
enabled: true
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
|
||||
35
kubernetes/cronjob.yaml
Normal file
35
kubernetes/cronjob.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: descheduler-cronjob
|
||||
namespace: kube-system
|
||||
spec:
|
||||
schedule: "*/2 * * * *"
|
||||
concurrencyPolicy: "Forbid"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.18.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
@@ -1,55 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: descheduler-cronjob
|
||||
namespace: kube-system
|
||||
spec:
|
||||
schedule: "*/2 * * * *"
|
||||
concurrencyPolicy: "Forbid"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- cronjob.yaml
|
||||
@@ -1,62 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: descheduler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: descheduler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: descheduler
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--descheduling-interval"
|
||||
- "5m"
|
||||
- "--v"
|
||||
- "3"
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- deployment.yaml
|
||||
33
kubernetes/job.yaml
Normal file
33
kubernetes/job.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.18.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
@@ -1,53 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: descheduler-job
|
||||
namespace: kube-system
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
name: descheduler-pod
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: registry.k8s.io/descheduler/descheduler:v0.34.0
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
args:
|
||||
- "--policy-config-file"
|
||||
- "/policy-dir/policy.yaml"
|
||||
- "--v"
|
||||
- "3"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
restartPolicy: "Never"
|
||||
serviceAccountName: descheduler-sa
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: descheduler-policy-configmap
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../base
|
||||
- job.yaml
|
||||
40
kubernetes/rbac.yaml
Normal file
40
kubernetes/rbac.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: descheduler-cluster-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: descheduler-sa
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: descheduler-cluster-role-binding
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: descheduler-cluster-role
|
||||
subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"sigs.k8s.io/descheduler/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
// DeschedulerSubsystem - subsystem name used by descheduler
|
||||
DeschedulerSubsystem = "descheduler"
|
||||
)
|
||||
|
||||
var (
|
||||
PodsEvicted = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
PodsEvictedTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "pods_evicted_total",
|
||||
Help: "Number of total evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
}, []string{"result", "strategy", "profile", "namespace", "node"})
|
||||
|
||||
buildInfo = metrics.NewGauge(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "build_info",
|
||||
Help: "Build info about descheduler, including Go version, Descheduler version, Git SHA, Git branch",
|
||||
ConstLabels: map[string]string{"GoVersion": version.Get().GoVersion, "AppVersion": version.Get().Major + "." + version.Get().Minor, "DeschedulerVersion": version.Get().GitVersion, "GitBranch": version.Get().GitBranch, "GitSha1": version.Get().GitSha1},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
)
|
||||
|
||||
DeschedulerLoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
LoopDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "loop_duration_seconds",
|
||||
Help: "Time taken to complete a full descheduling cycle",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500},
|
||||
}, []string{})
|
||||
|
||||
DeschedulerStrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "descheduler_strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
DeprecatedVersion: "0.34.0",
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
StrategyDuration = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: DeschedulerSubsystem,
|
||||
Name: "strategy_duration_seconds",
|
||||
Help: "Time taken to complete Each strategy of the descheduling operation",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 50, 100},
|
||||
}, []string{"strategy", "profile"})
|
||||
|
||||
metricsList = []metrics.Registerable{
|
||||
PodsEvicted,
|
||||
PodsEvictedTotal,
|
||||
buildInfo,
|
||||
DeschedulerLoopDuration,
|
||||
DeschedulerStrategyDuration,
|
||||
LoopDuration,
|
||||
StrategyDuration,
|
||||
}
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// Register all metrics.
|
||||
func Register() {
|
||||
// Register the metrics.
|
||||
registerMetrics.Do(func() {
|
||||
RegisterMetrics(metricsList...)
|
||||
})
|
||||
}
|
||||
|
||||
// RegisterMetrics registers a list of metrics.
|
||||
func RegisterMetrics(extraMetrics ...metrics.Registerable) {
|
||||
for _, metric := range extraMetrics {
|
||||
legacyregistry.MustRegister(metric)
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user