1
0
mirror of https://github.com/kubernetes-sigs/descheduler.git synced 2026-01-26 05:14:13 +01:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Kubernetes Prow Robot
77d8b3d49a Merge pull request #746 from damemi/update-go-version-121
[release-1.21] Bump Go to 1.16.14
2022-03-03 02:32:48 -08:00
Mike Dame
cddc15bd3a Bump Go to 1.16.14 2022-02-25 20:51:01 +00:00
2396 changed files with 63780 additions and 215304 deletions

1
.gitignore vendored
View File

@@ -4,4 +4,3 @@ vendordiff.patch
.idea/
*.code-workspace
.vscode/
kind

View File

@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.17.3
FROM golang:1.16.14
WORKDIR /go/src/sigs.k8s.io/descheduler
COPY . .

View File

@@ -24,8 +24,8 @@ ARCHS = amd64 arm arm64
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
GOLANGCI_VERSION := v1.43.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
GOLANGCI_VERSION := v1.30.0
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint)
# REGISTRY is the container registry to push
# into. The default is to push to the staging
@@ -43,7 +43,7 @@ IMAGE_GCLOUD:=$(REGISTRY)/descheduler:$(VERSION)
# In the future binaries can be uploaded to
# GCS bucket gs://k8s-staging-descheduler.
HAS_HELM := $(shell which helm 2> /dev/null)
HAS_HELM := $(shell which helm)
all: build
@@ -127,21 +127,18 @@ gen:
verify-gen:
./hack/verify-conversions.sh
./hack/verify-deep-copies.sh
./hack/verify-defaulters.sh
lint:
ifndef HAS_GOLANGCI
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b ./_output/bin ${GOLANGCI_VERSION}
endif
./_output/bin/golangci-lint run
lint-chart: ensure-helm-install
helm lint ./charts/descheduler
test-helm: ensure-helm-install
./test/run-helm-tests.sh
ensure-helm-install:
lint-chart:
ifndef HAS_HELM
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
endif
endif
helm lint ./charts/descheduler
test-helm:
./test/run-helm-tests.sh

117
README.md
View File

@@ -1,4 +1,4 @@
[![Go Report Card](https://goreportcard.com/badge/sigs.k8s.io/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
[![Go Report Card](https://goreportcard.com/badge/kubernetes-sigs/descheduler)](https://goreportcard.com/report/sigs.k8s.io/descheduler)
![Release Charts](https://github.com/kubernetes-sigs/descheduler/workflows/Release%20Charts/badge.svg)
# Descheduler for Kubernetes
@@ -42,7 +42,6 @@ Table of Contents
- [RemovePodsViolatingTopologySpreadConstraint](#removepodsviolatingtopologyspreadconstraint)
- [RemovePodsHavingTooManyRestarts](#removepodshavingtoomanyrestarts)
- [PodLifeTime](#podlifetime)
- [RemoveFailedPods](#removefailedpods)
- [Filter Pods](#filter-pods)
- [Namespace filtering](#namespace-filtering)
- [Priority filtering](#priority-filtering)
@@ -103,17 +102,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
Run As A Job
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.21.0' | kubectl apply -f -
```
Run As A CronJob
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.21.0' | kubectl apply -f -
```
Run As A Deployment
```
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f -
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.21.0' | kubectl apply -f -
```
## User Guide
@@ -122,28 +121,36 @@ See the [user guide](docs/user-guide.md) in the `/docs` directory.
## Policy and Strategies
Descheduler's policy is configurable and includes strategies that can be enabled or disabled. By default, all strategies are enabled.
Descheduler's policy is configurable and includes strategies that can be enabled or disabled.
Nine strategies
1. `RemoveDuplicates`
2. `LowNodeUtilization`
3. `HighNodeUtilization`
4. `RemovePodsViolatingInterPodAntiAffinity`
5. `RemovePodsViolatingNodeAffinity`
6. `RemovePodsViolatingNodeTaints`
7. `RemovePodsViolatingTopologySpreadConstraint`
8. `RemovePodsHavingTooManyRestarts`
9. `PodLifeTime`
are currently implemented. As part of the policy, the
parameters associated with the strategies can be configured too. By default, all strategies are enabled.
The policy includes a common configuration that applies to all the strategies:
| Name | Default Value | Description |
|------|---------------|-------------|
| `nodeSelector` | `nil` | limiting the nodes which are processed |
| `evictLocalStoragePods` | `false` | allows eviction of pods with local storage |
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
As part of the policy, the parameters associated with each strategy can be configured.
See each strategy for details on available parameters.
![Strategies diagram](strategies_diagram.png)
**Policy:**
The policy also includes common configuration for all the strategies:
- `nodeSelector` - limiting the nodes which are processed
- `evictLocalStoragePods` - allows eviction of pods with local storage
- `evictSystemCriticalPods` - [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns
- `ignorePvcPods` - set whether PVC pods should be evicted or ignored (defaults to `false`)
- `maxNoOfPodsToEvictPerNode` - maximum number of pods evicted from each node (summed through all strategies)
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
nodeSelector: prod=dev
evictFailedBarePods: false
evictLocalStoragePods: true
evictSystemCriticalPods: true
maxNoOfPodsToEvictPerNode: 40
@@ -152,11 +159,6 @@ strategies:
...
```
The following diagram provides a visualization of most of the strategies to help
categorize how strategies fit together.
![Strategies diagram](strategies_diagram.png)
### RemoveDuplicates
This strategy makes sure that there is only one pod associated with a ReplicaSet (RS),
@@ -265,11 +267,9 @@ under utilized frequently or for a short period of time. By default, `numberOfNo
### HighNodeUtilization
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
trigger down scaling of under utilized nodes.
This strategy **must** be used with the scheduler strategy `MostRequestedPriority`. The parameters of this strategy are
configured under `nodeResourceUtilizationThresholds`.
This strategy finds nodes that are under utilized and evicts pods in the hope that these pods will be scheduled compactly into fewer nodes.
This strategy **must** be used with the
scheduler strategy `MostRequestedPriority`. The parameters of this strategy are configured under `nodeResourceUtilizationThresholds`.
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
`thresholds` can be configured for cpu, memory, number of pods, and extended resources in terms of percentage. The percentage is
@@ -458,9 +458,9 @@ strategies:
This strategy makes sure that pods having too many restarts are removed from nodes. For example a pod with EBS/PD that
can't get the volume/disk attached to the instance, then the pod should be re-scheduled to other nodes. Its parameters
include `podRestartThreshold`, which is the number of restarts (summed over all eligible containers) at which a pod
should be evicted, and `includingInitContainers`, which determines whether init container restarts should be factored
into that calculation.
include `podRestartThreshold`, which is the number of restarts at which a pod should be evicted, and `includingInitContainers`,
which determines whether init container restarts should be factored into that calculation.
|`labelSelector`|(see [label filtering](#label-filtering))|
**Parameters:**
@@ -471,7 +471,6 @@ into that calculation.
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
@@ -521,47 +520,6 @@ strategies:
- "Pending"
```
### RemoveFailedPods
This strategy evicts pods that are in failed status phase.
You can provide an optional parameter to filter by failed `reasons`.
`reasons` can be expanded to include reasons of InitContainers as well by setting the optional parameter `includingInitContainers` to `true`.
You can specify an optional parameter `minPodLifetimeSeconds` to evict pods that are older than specified seconds.
Lastly, you can specify the optional parameter `excludeOwnerKinds` and if a pod
has any of these `Kind`s listed as an `OwnerRef`, that pod will not be considered for eviction.
**Parameters:**
|Name|Type|
|---|---|
|`minPodLifetimeSeconds`|uint|
|`excludeOwnerKinds`|list(string)|
|`reasons`|list(string)|
|`includingInitContainers`|bool|
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|`labelSelector`|(see [label filtering](#label-filtering))|
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
**Example:**
```yaml
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "NodeAffinity"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600
```
## Filter Pods
### Namespace filtering
@@ -574,7 +532,6 @@ The following strategies accept a `namespaces` parameter which allows to specify
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemoveDuplicates`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
For example:
@@ -657,7 +614,7 @@ does not exist, descheduler won't create it and will throw an error.
### Label filtering
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta)
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#labelselector-v1-meta)
to filter pods by their labels:
* `PodLifeTime`
@@ -666,7 +623,6 @@ to filter pods by their labels:
* `RemovePodsViolatingNodeAffinity`
* `RemovePodsViolatingInterPodAntiAffinity`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemoveFailedPods`
This allows running strategies among pods the descheduler is interested in.
@@ -701,7 +657,6 @@ The following strategies accept a `nodeFit` boolean parameter which can optimize
* `RemovePodsViolatingNodeTaints`
* `RemovePodsViolatingTopologySpreadConstraint`
* `RemovePodsHavingTooManyRestarts`
* `RemoveFailedPods`
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
- A `nodeSelector` on the pod
@@ -742,8 +697,8 @@ Using Deployments instead of ReplicationControllers provides an automated rollou
When the descheduler decides to evict pods from a node, it employs the following general mechanism:
* [Critical pods](https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/) (with priorityClassName set to system-cluster-critical or system-node-critical) are never evicted (unless `evictSystemCriticalPods: true` is set).
* Pods (static or mirrored pods or standalone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
never evicted because these pods won't be recreated. (Standalone pods in failed status phase can be evicted by setting `evictFailedBarePods: true`)
* Pods (static or mirrored pods or stand alone pods) not part of an ReplicationController, ReplicaSet(Deployment), StatefulSet, or Job are
never evicted because these pods won't be recreated.
* Pods associated with DaemonSets are never evicted.
* Pods with local storage are never evicted (unless `evictLocalStoragePods: true` is set).
* Pods with PVCs are evicted (unless `ignorePvcPods: true` is set).
@@ -752,7 +707,6 @@ best effort pods are evicted before burstable and guaranteed pods.
* All types of pods with the annotation `descheduler.alpha.kubernetes.io/evict` are eligible for eviction. This
annotation is used to override checks which prevent eviction and users can select which pod is evicted.
Users should know how and if the pod will be recreated.
* Pods with a non-nil DeletionTimestamp are not evicted by default.
Setting `--v=4` or greater on the Descheduler will log all reasons why any pod is not evictable.
@@ -782,7 +736,6 @@ packages that it is compiled with.
Descheduler | Supported Kubernetes Version
-------------|-----------------------------
v0.22 | v1.22
v0.21 | v1.21
v0.20 | v1.20
v0.19 | v1.19

View File

@@ -1,7 +1,7 @@
apiVersion: v1
name: descheduler
version: 0.23.0
appVersion: 0.23.0
version: 0.21.0
appVersion: 0.21.0
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
keywords:
- kubernetes

View File

@@ -45,11 +45,9 @@ The following table lists the configurable parameters of the _descheduler_ chart
| Parameter | Description | Default |
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
@@ -57,7 +55,6 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
@@ -67,6 +64,3 @@ The following table lists the configurable parameters of the _descheduler_ chart
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |

View File

@@ -1 +1 @@
Descheduler installed as a {{ .Values.kind }} .
Descheduler installed as a cron job.

View File

@@ -42,17 +42,6 @@ app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*

View File

@@ -1,4 +1,3 @@
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
@@ -7,9 +6,6 @@ metadata:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
@@ -31,7 +27,8 @@ spec:
{{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }}
labels:
{{- include "descheduler.selectorLabels" . | nindent 12 }}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }}
@@ -40,23 +37,11 @@ spec:
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
@@ -72,8 +57,6 @@ spec:
- {{ $value | quote }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
securityContext:
@@ -91,4 +74,3 @@ spec:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -1,81 +0,0 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- "/bin/descheduler"
args:
- "--policy-config-file"
- "/policy-dir/policy.yaml"
- "--descheduling-interval"
- {{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
- {{ printf "--%s" $key | quote }}
{{- if $value }}
- {{ $value | quote }}
{{- end }}
{{- end }}
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -5,7 +5,4 @@ metadata:
name: {{ template "descheduler.serviceAccountName" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -2,17 +2,12 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image:
repository: k8s.gcr.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets: []
resources:
requests:
cpu: 500m
@@ -24,19 +19,12 @@ resources:
nameOverride: ""
fullnameOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1" # Use "batch/v1beta1" for k8s version < 1.21.0. TODO(@7i) remove with 1.23 release
schedule: "*/2 * * * *"
suspend: false
#startingDeadlineSeconds: 200
#successfulJobsHistoryLimit: 1
#failedJobsHistoryLimit: 1
# Required when running as a Deployment
deschedulingInterval: 5m
cmdOptions:
v: 3
# evict-local-storage-pods:
@@ -74,23 +62,6 @@ priorityClassName: system-cluster-critical
nodeSelector: {}
# foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac:
# Specifies whether RBAC resources should be created
create: true
@@ -105,15 +76,3 @@ serviceAccount:
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10

View File

@@ -7,7 +7,7 @@ timeout: 1200s
options:
substitution_option: ALLOW_LOOSE
steps:
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
- name: 'gcr.io/k8s-testimages/gcb-docker-gcloud:v20190906-745fed4'
entrypoint: make
env:
- DOCKER_CLI_EXPERIMENTAL=enabled

View File

@@ -20,8 +20,10 @@ package options
import (
"github.com/spf13/pflag"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/logs"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
@@ -37,6 +39,7 @@ type DeschedulerServer struct {
componentconfig.DeschedulerConfiguration
Client clientset.Interface
Logs *logs.Options
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
DisableMetrics bool
}
@@ -53,10 +56,18 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
return &DeschedulerServer{
DeschedulerConfiguration: *cfg,
Logs: logs.NewOptions(),
SecureServing: secureServing,
}, nil
}
// Validation checks for DeschedulerServer.
func (s *DeschedulerServer) Validate() error {
var errs []error
errs = append(errs, s.Logs.Validate()...)
return utilerrors.NewAggregate(errs)
}
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
versionedCfg := v1alpha1.DeschedulerConfiguration{}
deschedulerscheme.Scheme.Default(&versionedCfg)
@@ -74,6 +85,12 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&rs.KubeconfigFile, "kubeconfig", rs.KubeconfigFile, "File with kube configuration.")
fs.StringVar(&rs.PolicyConfigFile, "policy-config-file", rs.PolicyConfigFile, "File with descheduler policy configuration.")
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
// node-selector query causes descheduler to run only on nodes that matches the node labels in the query
fs.StringVar(&rs.NodeSelector, "node-selector", rs.NodeSelector, "DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
// max-no-pods-to-evict limits the maximum number of pods to be evicted per node by descheduler.
fs.IntVar(&rs.MaxNoOfPodsToEvictPerNode, "max-pods-to-evict-per-node", rs.MaxNoOfPodsToEvictPerNode, "DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler")
// evict-local-storage-pods allows eviction of pods that are using local storage. This is false by default.
fs.BoolVar(&rs.EvictLocalStoragePods, "evict-local-storage-pods", rs.EvictLocalStoragePods, "DEPRECATED: enables evicting pods using local storage by descheduler")
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
rs.SecureServing.AddFlags(fs)

View File

@@ -19,11 +19,8 @@ package app
import (
"context"
"flag"
"io"
"os/signal"
"syscall"
"k8s.io/apiserver/pkg/server/healthz"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/pkg/descheduler"
@@ -33,9 +30,7 @@ import (
apiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/config"
_ "k8s.io/component-base/logs/json/register"
"k8s.io/component-base/logs/registry"
aflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
)
@@ -53,7 +48,8 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
Short: "descheduler",
Long: `The descheduler evicts pods which may be bound to less desired nodes`,
Run: func(cmd *cobra.Command, args []string) {
// s.Logs.Config.Format = s.Logging.Format
s.Logs.LogFormat = s.Logging.Format
s.Logs.Apply()
// LoopbackClientConfig is a config for a privileged loopback connection
var LoopbackClientConfig *restclient.Config
@@ -63,30 +59,23 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
return
}
factory, _ := registry.LogRegistry.Get(s.Logging.Format)
if factory == nil {
klog.ClearLogger()
} else {
log, logrFlush := factory.Create(config.FormatOptions{})
defer logrFlush()
klog.SetLogger(log)
}
ctx, done := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer done()
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
if !s.DisableMetrics {
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
}
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
klog.Fatalf("failed to start secure server: %v", err)
if err := s.Validate(); err != nil {
klog.ErrorS(err, "failed to validate server configuration")
return
}
err := Run(ctx, s)
if !s.DisableMetrics {
ctx := context.TODO()
pathRecorderMux := mux.NewPathRecorderMux("descheduler")
pathRecorderMux.Handle("/metrics", legacyregistry.HandlerWithReset())
if _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done()); err != nil {
klog.Fatalf("failed to start secure server: %v", err)
return
}
}
err := Run(s)
if err != nil {
klog.ErrorS(err, "descheduler server")
}
@@ -94,10 +83,12 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
}
cmd.SetOut(out)
flags := cmd.Flags()
flags.SetNormalizeFunc(aflag.WordSepNormalizeFunc)
flags.AddGoFlagSet(flag.CommandLine)
s.AddFlags(flags)
return cmd
}
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return descheduler.Run(ctx, rs)
func Run(rs *options.DeschedulerServer) error {
return descheduler.Run(rs)
}

View File

@@ -17,23 +17,22 @@ limitations under the License.
package main
import (
"fmt"
"k8s.io/component-base/logs"
"os"
"k8s.io/component-base/cli"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/cmd/descheduler/app"
)
func init() {
klog.SetOutput(os.Stdout)
klog.InitFlags(nil)
}
func main() {
out := os.Stdout
cmd := app.NewDeschedulerCommand(out)
cmd.AddCommand(app.NewVersionCommand())
code := cli.Run(cmd)
os.Exit(code)
logs.InitLogs()
defer logs.FlushLogs()
if err := cmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@@ -9,7 +9,7 @@
3. Push the release branch to the descheuler repo and ensure branch protection is enabled (not required for patch releases)
4. Tag the repository from the `master` branch (from the `release-1.18` branch for a patch release) and push the tag `VERSION=v0.18.0 git tag -m $VERSION $VERSION; git push origin $VERSION`
5. Publish a draft release using the tag you just created
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
6. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
7. Publish release
8. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release
@@ -22,7 +22,7 @@
5. Checkout the tag you just created and make sure your repo is clean by git's standards `git checkout $VERSION`
6. Build and push the container image to the staging registry `VERSION=$VERSION make push-all`
7. Publish a draft release using the tag you just created
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/main/k8s.gcr.io#image-promoter)
8. Perform the [image promotion process](https://github.com/kubernetes/k8s.io/tree/master/k8s.gcr.io#image-promoter)
9. Publish release
10. Email `kubernetes-sig-scheduling@googlegroups.com` to announce the release

View File

@@ -4,7 +4,6 @@ Starting with descheduler release v0.10.0 container images are available in the
Descheduler Version | Container Image | Architectures |
------------------- |-----------------------------------------------------|-------------------------|
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
v0.20.0 | k8s.gcr.io/descheduler/descheduler:v0.20.0 | AMD64<br>ARM64 |
v0.19.0 | k8s.gcr.io/descheduler/descheduler:v0.19.0 | AMD64 |

View File

@@ -1,14 +0,0 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemoveFailedPods":
enabled: true
params:
failedPods:
reasons:
- "OutOfcpu"
- "CreateContainerConfigError"
includingInitContainers: true
excludeOwnerKinds:
- "Job"
minPodLifetimeSeconds: 3600 # 1 hour

View File

@@ -1,3 +1,4 @@
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:

View File

@@ -1,3 +1,4 @@
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:

View File

@@ -1,3 +1,4 @@
---
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:

View File

@@ -23,7 +23,3 @@ strategies:
podsHavingTooManyRestarts:
podRestartThreshold: 100
includingInitContainers: true
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true

View File

@@ -1,7 +0,0 @@
apiVersion: "descheduler/v1alpha1"
kind: "DeschedulerPolicy"
strategies:
"RemovePodsViolatingTopologySpreadConstraint":
enabled: true
params:
includeSoftConstraints: true # Include 'ScheduleAnyways' constraints

118
go.mod
View File

@@ -1,117 +1,19 @@
module sigs.k8s.io/descheduler
go 1.17
go 1.16
require (
github.com/client9/misspell v0.3.4
github.com/spf13/cobra v1.2.1
github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
k8s.io/api v0.23.0
k8s.io/apimachinery v0.23.0
k8s.io/apiserver v0.23.0
k8s.io/client-go v0.23.0
k8s.io/code-generator v0.23.0
k8s.io/component-base v0.23.0
k8s.io/component-helpers v0.23.0
k8s.io/klog/v2 v2.30.0
k8s.io/api v0.21.0
k8s.io/apimachinery v0.21.0
k8s.io/apiserver v0.21.0
k8s.io/client-go v0.21.0
k8s.io/code-generator v0.21.0
k8s.io/component-base v0.21.0
k8s.io/component-helpers v0.21.0
k8s.io/klog/v2 v2.8.0
k8s.io/kubectl v0.20.5
sigs.k8s.io/mdtoc v1.0.1
)
require (
cloud.google.com/go v0.81.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
github.com/google/go-cmp v0.5.5 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.5 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mmarkdown/mmark v2.0.40+incompatible // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.0 // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)

538
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -18,15 +18,15 @@ E2E_GCE_HOME=$DESCHEDULER_ROOT/hack/e2e-gce
create_cluster() {
echo "#################### Creating instances ##########################"
gcloud compute instances create descheduler-$master_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$master_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
# Keeping the --zone here so as to make sure that e2e's can run locally.
echo "gcloud compute instances delete descheduler-$master_uuid --zone=us-east1-b --quiet" > $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node1_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
gcloud compute instances create descheduler-$node1_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node1_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image-family="ubuntu-1804-lts" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-c --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
gcloud compute instances create descheduler-$node2_uuid --image="ubuntu-1604-xenial-v20180306" --image-project="ubuntu-os-cloud" --zone=us-east1-b
echo "gcloud compute instances delete descheduler-$node2_uuid --zone=us-east1-b --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
# Delete the firewall port created for master.
echo "gcloud compute firewall-rules delete kubeapiserver-$master_uuid --quiet" >> $E2E_GCE_HOME/delete_cluster.sh
@@ -44,10 +44,10 @@ generate_kubeadm_instance_files() {
transfer_install_files() {
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_install.sh descheduler-$master_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_preinstall.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
}
@@ -55,19 +55,19 @@ install_kube() {
# Docker installation.
gcloud compute ssh descheduler-$master_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo apt-get update; sudo apt-get install -y docker.io" --zone=us-east1-b
# kubeadm installation.
# 1. Transfer files to master, nodes.
transfer_install_files
# 2. Install kubeadm.
#TODO: Add rm /tmp/kubeadm_install.sh
# Open port for kube API server
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute firewall-rules create kubeapiserver-$master_uuid --allow tcp:6443 --source-tags=descheduler-$master_uuid --source-ranges=0.0.0.0/0 --description="Opening api server port"
gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
kubeadm_join_command=$(gcloud compute ssh descheduler-$master_uuid --command "sudo chmod 755 /tmp/kubeadm_install.sh; sudo /tmp/kubeadm_install.sh" --zone=us-east1-b|grep 'kubeadm join')
# Copy the kubeconfig file onto /tmp for e2e tests.
# Copy the kubeconfig file onto /tmp for e2e tests.
gcloud compute ssh descheduler-$master_uuid --command "sudo cp /etc/kubernetes/admin.conf /tmp; sudo chmod 777 /tmp/admin.conf" --zone=us-east1-b
gcloud compute scp descheduler-$master_uuid:/tmp/admin.conf /tmp/admin.conf --zone=us-east1-b
@@ -75,15 +75,16 @@ install_kube() {
gcloud compute ssh descheduler-$master_uuid --command "sudo kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml --kubeconfig /etc/kubernetes/admin.conf" --zone=us-east1-b
echo $kubeadm_join_command > $E2E_GCE_HOME/kubeadm_join.sh
# Copy kubeadm_join to every node.
# Copy kubeadm_join to every node.
#TODO: Put these in a loop, so that extension becomes possible.
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node1_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node1_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-c
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-c
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_preinstall.sh; sudo /tmp/kubeadm_preinstall.sh" --zone=us-east1-b
gcloud compute scp $E2E_GCE_HOME/kubeadm_join.sh descheduler-$node2_uuid:/tmp --zone=us-east1-b
gcloud compute ssh descheduler-$node2_uuid --command "sudo chmod 755 /tmp/kubeadm_join.sh; sudo /tmp/kubeadm_join.sh" --zone=us-east1-b
}

View File

@@ -3,16 +3,4 @@ apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-a"
- role: worker
kubeadmConfigPatches:
- |
kind: JoinConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "topology.kubernetes.io/zone=local-b"

View File

@@ -1,4 +1,3 @@
//go:build tools
// +build tools
/*

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

View File

@@ -28,7 +28,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated output differs:" >&2
echo "${_out}" >&2
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh"
echo "Generated conversions verify failed. Please run ./hack/update-generated-conversions.sh (and commit the result)"
exit 1
fi
popd > /dev/null 2>&1

View File

@@ -28,7 +28,7 @@ pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated deep-copies output differs:" >&2
echo "${_out}" >&2
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh"
echo "Generated deep-copies verify failed. Please run ./hack/update-generated-deep-copies.sh (and commit the result)"
exit 1
fi
popd > /dev/null 2>&1

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
_tmpdir="$(mktemp -d "${DESCHEDULER_ROOT}/_tmp/kube-verify.XXXXXX")"
_deschedulertmp="${_tmpdir}"
mkdir -p "${_deschedulertmp}"
git archive --format=tar --prefix=descheduler/ "$(git write-tree)" | (cd "${_deschedulertmp}" && tar xf -)
_deschedulertmp="${_deschedulertmp}/descheduler"
pushd "${_deschedulertmp}" > /dev/null 2>&1
go build -o "${OS_OUTPUT_BINPATH}/defaulter-gen" "k8s.io/code-generator/cmd/defaulter-gen"
${OS_OUTPUT_BINPATH}/defaulter-gen \
--go-header-file "hack/boilerplate/boilerplate.go.txt" \
--input-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--extra-peer-dirs "${PRJ_PREFIX}/pkg/apis/componentconfig/v1alpha1,${PRJ_PREFIX}/pkg/api/v1alpha1" \
--output-file-base zz_generated.defaults
popd > /dev/null 2>&1
pushd "${DESCHEDULER_ROOT}" > /dev/null 2>&1
if ! _out="$(diff -Naupr pkg/ "${_deschedulertmp}/pkg/")"; then
echo "Generated defaulters output differs:" >&2
echo "${_out}" >&2
echo "Generated defaulters verify failed. Please run ./hack/update-generated-defaulters.sh"
fi
popd > /dev/null 2>&1
echo "Generated Defaulters verified."

View File

@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16') ]]; then
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
exit 1
fi

BIN
kind Executable file

Binary file not shown.

View File

@@ -16,7 +16,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
@@ -31,14 +31,6 @@ spec:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: descheduler-sa
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
imagePullPolicy: IfNotPresent
command:
- "/bin/descheduler"
@@ -33,14 +33,6 @@ spec:
ports:
- containerPort: 10258
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
resources:
requests:
cpu: 500m

View File

@@ -14,7 +14,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: descheduler
image: k8s.gcr.io/descheduler/descheduler:v0.22.0
image: k8s.gcr.io/descheduler/descheduler:v0.21.0
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
@@ -29,14 +29,6 @@ spec:
requests:
cpu: "500m"
memory: "256Mi"
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
securityContext:
allowPrivilegeEscalation: false
capabilities:

View File

@@ -34,9 +34,9 @@ var (
&metrics.CounterOpts{
Subsystem: DeschedulerSubsystem,
Name: "pods_evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace, by the node name. 'error' result means a pod could not be evicted",
Help: "Number of evicted pods, by the result, by the strategy, by the namespace. 'failed' result means a pod could not be evicted",
StabilityLevel: metrics.ALPHA,
}, []string{"result", "strategy", "namespace", "node"})
}, []string{"result", "strategy", "namespace"})
buildInfo = metrics.NewGauge(
&metrics.GaugeOpts{

View File

@@ -32,9 +32,6 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over
NodeSelector *string
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool
@@ -45,10 +42,7 @@ type DeschedulerPolicy struct {
IgnorePVCPods *bool
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *uint
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *uint
MaxNoOfPodsToEvictPerNode *int
}
type StrategyName string
@@ -81,7 +75,6 @@ type StrategyParameters struct {
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts
PodLifeTime *PodLifeTime
RemoveDuplicates *RemoveDuplicates
FailedPods *FailedPods
IncludeSoftConstraints bool
Namespaces *Namespaces
ThresholdPriority *int32
@@ -112,10 +105,3 @@ type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint
PodStatusPhases []string
}
type FailedPods struct {
ExcludeOwnerKinds []string
MinPodLifetimeSeconds *uint
Reasons []string
IncludingInitContainers bool
}

View File

@@ -32,9 +32,6 @@ type DeschedulerPolicy struct {
// NodeSelector for a set of nodes to operate over
NodeSelector *string `json:"nodeSelector,omitempty"`
// EvictFailedBarePods allows pods without ownerReferences and in failed phase to be evicted.
EvictFailedBarePods *bool `json:"evictFailedBarePods,omitempty"`
// EvictLocalStoragePods allows pods using local storage to be evicted.
EvictLocalStoragePods *bool `json:"evictLocalStoragePods,omitempty"`
@@ -46,9 +43,6 @@ type DeschedulerPolicy struct {
// MaxNoOfPodsToEvictPerNode restricts maximum of pods to be evicted per node.
MaxNoOfPodsToEvictPerNode *int `json:"maxNoOfPodsToEvictPerNode,omitempty"`
// MaxNoOfPodsToEvictPerNamespace restricts maximum of pods to be evicted per namespace.
MaxNoOfPodsToEvictPerNamespace *int `json:"maxNoOfPodsToEvictPerNamespace,omitempty"`
}
type StrategyName string
@@ -79,7 +73,6 @@ type StrategyParameters struct {
PodsHavingTooManyRestarts *PodsHavingTooManyRestarts `json:"podsHavingTooManyRestarts,omitempty"`
PodLifeTime *PodLifeTime `json:"podLifeTime,omitempty"`
RemoveDuplicates *RemoveDuplicates `json:"removeDuplicates,omitempty"`
FailedPods *FailedPods `json:"failedPods,omitempty"`
IncludeSoftConstraints bool `json:"includeSoftConstraints"`
Namespaces *Namespaces `json:"namespaces"`
ThresholdPriority *int32 `json:"thresholdPriority"`
@@ -110,10 +103,3 @@ type PodLifeTime struct {
MaxPodLifeTimeSeconds *uint `json:"maxPodLifeTimeSeconds,omitempty"`
PodStatusPhases []string `json:"podStatusPhases,omitempty"`
}
type FailedPods struct {
ExcludeOwnerKinds []string `json:"excludeOwnerKinds,omitempty"`
MinPodLifetimeSeconds *uint `json:"minPodLifetimeSeconds,omitempty"`
Reasons []string `json:"reasons,omitempty"`
IncludingInitContainers bool `json:"includingInitContainers,omitempty"`
}

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@@ -57,16 +56,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*FailedPods)(nil), (*api.FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_FailedPods_To_api_FailedPods(a.(*FailedPods), b.(*api.FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*api.FailedPods)(nil), (*FailedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_FailedPods_To_v1alpha1_FailedPods(a.(*api.FailedPods), b.(*FailedPods), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Namespaces)(nil), (*api.Namespaces)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Namespaces_To_api_Namespaces(a.(*Namespaces), b.(*api.Namespaces), scope)
}); err != nil {
@@ -133,24 +122,10 @@ func RegisterConversions(s *runtime.Scheme) error {
func autoConvert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *DeschedulerPolicy, out *api.DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*api.StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
**out = uint(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
return nil
}
@@ -162,24 +137,10 @@ func Convert_v1alpha1_DeschedulerPolicy_To_api_DeschedulerPolicy(in *Descheduler
func autoConvert_api_DeschedulerPolicy_To_v1alpha1_DeschedulerPolicy(in *api.DeschedulerPolicy, out *DeschedulerPolicy, s conversion.Scope) error {
out.Strategies = *(*StrategyList)(unsafe.Pointer(&in.Strategies))
out.NodeSelector = (*string)(unsafe.Pointer(in.NodeSelector))
out.EvictFailedBarePods = (*bool)(unsafe.Pointer(in.EvictFailedBarePods))
out.EvictLocalStoragePods = (*bool)(unsafe.Pointer(in.EvictLocalStoragePods))
out.EvictSystemCriticalPods = (*bool)(unsafe.Pointer(in.EvictSystemCriticalPods))
out.IgnorePVCPods = (*bool)(unsafe.Pointer(in.IgnorePVCPods))
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNode = nil
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = int(**in)
} else {
out.MaxNoOfPodsToEvictPerNamespace = nil
}
out.MaxNoOfPodsToEvictPerNode = (*int)(unsafe.Pointer(in.MaxNoOfPodsToEvictPerNode))
return nil
}
@@ -212,32 +173,6 @@ func Convert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in *api.Des
return autoConvert_api_DeschedulerStrategy_To_v1alpha1_DeschedulerStrategy(in, out, s)
}
func autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_v1alpha1_FailedPods_To_api_FailedPods is an autogenerated conversion function.
func Convert_v1alpha1_FailedPods_To_api_FailedPods(in *FailedPods, out *api.FailedPods, s conversion.Scope) error {
return autoConvert_v1alpha1_FailedPods_To_api_FailedPods(in, out, s)
}
func autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
out.ExcludeOwnerKinds = *(*[]string)(unsafe.Pointer(&in.ExcludeOwnerKinds))
out.MinPodLifetimeSeconds = (*uint)(unsafe.Pointer(in.MinPodLifetimeSeconds))
out.Reasons = *(*[]string)(unsafe.Pointer(&in.Reasons))
out.IncludingInitContainers = in.IncludingInitContainers
return nil
}
// Convert_api_FailedPods_To_v1alpha1_FailedPods is an autogenerated conversion function.
func Convert_api_FailedPods_To_v1alpha1_FailedPods(in *api.FailedPods, out *FailedPods, s conversion.Scope) error {
return autoConvert_api_FailedPods_To_v1alpha1_FailedPods(in, out, s)
}
func autoConvert_v1alpha1_Namespaces_To_api_Namespaces(in *Namespaces, out *api.Namespaces, s conversion.Scope) error {
out.Include = *(*[]string)(unsafe.Pointer(&in.Include))
out.Exclude = *(*[]string)(unsafe.Pointer(&in.Exclude))
@@ -354,7 +289,6 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
out.PodsHavingTooManyRestarts = (*api.PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*api.PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*api.RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*api.FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*api.Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))
@@ -375,7 +309,6 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
out.PodsHavingTooManyRestarts = (*PodsHavingTooManyRestarts)(unsafe.Pointer(in.PodsHavingTooManyRestarts))
out.PodLifeTime = (*PodLifeTime)(unsafe.Pointer(in.PodLifeTime))
out.RemoveDuplicates = (*RemoveDuplicates)(unsafe.Pointer(in.RemoveDuplicates))
out.FailedPods = (*FailedPods)(unsafe.Pointer(in.FailedPods))
out.IncludeSoftConstraints = in.IncludeSoftConstraints
out.Namespaces = (*Namespaces)(unsafe.Pointer(in.Namespaces))
out.ThresholdPriority = (*int32)(unsafe.Pointer(in.ThresholdPriority))

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@@ -42,11 +41,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
@@ -67,11 +61,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(int)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(int)
**out = **in
}
return
}
@@ -114,37 +103,6 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
@@ -336,11 +294,6 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@@ -42,11 +41,6 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
*out = new(string)
**out = **in
}
if in.EvictFailedBarePods != nil {
in, out := &in.EvictFailedBarePods, &out.EvictFailedBarePods
*out = new(bool)
**out = **in
}
if in.EvictLocalStoragePods != nil {
in, out := &in.EvictLocalStoragePods, &out.EvictLocalStoragePods
*out = new(bool)
@@ -64,12 +58,7 @@ func (in *DeschedulerPolicy) DeepCopyInto(out *DeschedulerPolicy) {
}
if in.MaxNoOfPodsToEvictPerNode != nil {
in, out := &in.MaxNoOfPodsToEvictPerNode, &out.MaxNoOfPodsToEvictPerNode
*out = new(uint)
**out = **in
}
if in.MaxNoOfPodsToEvictPerNamespace != nil {
in, out := &in.MaxNoOfPodsToEvictPerNamespace, &out.MaxNoOfPodsToEvictPerNamespace
*out = new(uint)
*out = new(int)
**out = **in
}
return
@@ -114,37 +103,6 @@ func (in *DeschedulerStrategy) DeepCopy() *DeschedulerStrategy {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedPods) DeepCopyInto(out *FailedPods) {
*out = *in
if in.ExcludeOwnerKinds != nil {
in, out := &in.ExcludeOwnerKinds, &out.ExcludeOwnerKinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.MinPodLifetimeSeconds != nil {
in, out := &in.MinPodLifetimeSeconds, &out.MinPodLifetimeSeconds
*out = new(uint)
**out = **in
}
if in.Reasons != nil {
in, out := &in.Reasons, &out.Reasons
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedPods.
func (in *FailedPods) DeepCopy() *FailedPods {
if in == nil {
return nil
}
out := new(FailedPods)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespaces) DeepCopyInto(out *Namespaces) {
*out = *in
@@ -336,11 +294,6 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
*out = new(RemoveDuplicates)
(*in).DeepCopyInto(*out)
}
if in.FailedPods != nil {
in, out := &in.FailedPods, &out.FailedPods
*out = new(FailedPods)
(*in).DeepCopyInto(*out)
}
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = new(Namespaces)

View File

@@ -17,9 +17,8 @@ limitations under the License.
package v1alpha1
import (
"time"
componentbaseconfig "k8s.io/component-base/config"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@@ -29,7 +28,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Logging.DeepCopyInto(&out.Logging)
out.Logging = in.Logging
return
}

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*

View File

@@ -1,4 +1,3 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
@@ -29,7 +28,7 @@ import (
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Logging.DeepCopyInto(&out.Logging)
out.Logging = in.Logging
return
}

View File

@@ -19,22 +19,14 @@ package descheduler
import (
"context"
"fmt"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
corev1informers "k8s.io/client-go/informers/core/v1"
schedulingv1informers "k8s.io/client-go/informers/scheduling/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
"sigs.k8s.io/descheduler/metrics"
"sigs.k8s.io/descheduler/pkg/api"
@@ -42,14 +34,13 @@ import (
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
)
func Run(ctx context.Context, rs *options.DeschedulerServer) error {
func Run(rs *options.DeschedulerServer) error {
metrics.Register()
ctx := context.Background()
rsclient, err := client.CreateClient(rs.KubeconfigFile)
if err != nil {
return err
@@ -69,108 +60,15 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
return err
}
// tie in root ctx with our wait stopChannel
stopChannel := make(chan struct{})
go func() {
<-ctx.Done()
close(stopChannel)
}()
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion, stopChannel)
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
func cachedClient(
realClient clientset.Interface,
podInformer corev1informers.PodInformer,
nodeInformer corev1informers.NodeInformer,
namespaceInformer corev1informers.NamespaceInformer,
priorityClassInformer schedulingv1informers.PriorityClassInformer,
) (clientset.Interface, error) {
fakeClient := fakeclientset.NewSimpleClientset()
// simulate a pod eviction by deleting a pod
fakeClient.PrependReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
if action.GetSubresource() == "eviction" {
createAct, matched := action.(core.CreateActionImpl)
if !matched {
return false, nil, fmt.Errorf("unable to convert action to core.CreateActionImpl")
}
eviction, matched := createAct.Object.(*policy.Eviction)
if !matched {
return false, nil, fmt.Errorf("unable to convert action object into *policy.Eviction")
}
if err := fakeClient.Tracker().Delete(action.GetResource(), eviction.GetNamespace(), eviction.GetName()); err != nil {
return false, nil, fmt.Errorf("unable to delete pod %v/%v: %v", eviction.GetNamespace(), eviction.GetName(), err)
}
return true, nil, nil
}
// fallback to the default reactor
return false, nil, nil
})
klog.V(3).Infof("Pulling resources for the cached client from the cluster")
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list pods: %v", err)
}
for _, item := range pods {
if _, err := fakeClient.CoreV1().Pods(item.Namespace).Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy pod: %v", err)
}
}
nodes, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list nodes: %v", err)
}
for _, item := range nodes {
if _, err := fakeClient.CoreV1().Nodes().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
namespaces, err := namespaceInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list namespaces: %v", err)
}
for _, item := range namespaces {
if _, err := fakeClient.CoreV1().Namespaces().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy node: %v", err)
}
}
priorityClasses, err := priorityClassInformer.Lister().List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("unable to list priorityclasses: %v", err)
}
for _, item := range priorityClasses {
if _, err := fakeClient.SchedulingV1().PriorityClasses().Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("unable to copy priorityclass: %v", err)
}
}
return fakeClient, nil
}
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor)
func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer, deschedulerPolicy *api.DeschedulerPolicy, evictionPolicyGroupVersion string, stopChannel chan struct{}) error {
sharedInformerFactory := informers.NewSharedInformerFactory(rs.Client, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
namespaceInformer := sharedInformerFactory.Core().V1().Namespaces()
priorityClassInformer := sharedInformerFactory.Scheduling().V1().PriorityClasses()
// create the informers
namespaceInformer.Informer()
priorityClassInformer.Informer()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
return fmt.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
@@ -185,27 +83,18 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
"RemovePodsHavingTooManyRestarts": strategies.RemovePodsHavingTooManyRestarts,
"PodLifeTime": strategies.PodLifeTime,
"RemovePodsViolatingTopologySpreadConstraint": strategies.RemovePodsViolatingTopologySpreadConstraint,
"RemoveFailedPods": strategies.RemoveFailedPods,
}
var nodeSelector string
nodeSelector := rs.NodeSelector
if deschedulerPolicy.NodeSelector != nil {
nodeSelector = *deschedulerPolicy.NodeSelector
}
var evictLocalStoragePods bool
evictLocalStoragePods := rs.EvictLocalStoragePods
if deschedulerPolicy.EvictLocalStoragePods != nil {
evictLocalStoragePods = *deschedulerPolicy.EvictLocalStoragePods
}
evictBarePods := false
if deschedulerPolicy.EvictFailedBarePods != nil {
evictBarePods = *deschedulerPolicy.EvictFailedBarePods
if evictBarePods {
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
}
}
evictSystemCriticalPods := false
if deschedulerPolicy.EvictSystemCriticalPods != nil {
evictSystemCriticalPods = *deschedulerPolicy.EvictSystemCriticalPods
@@ -219,7 +108,12 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
ignorePvcPods = *deschedulerPolicy.IgnorePVCPods
}
wait.NonSlidingUntil(func() {
maxNoOfPodsToEvictPerNode := rs.MaxNoOfPodsToEvictPerNode
if deschedulerPolicy.MaxNoOfPodsToEvictPerNode != nil {
maxNoOfPodsToEvictPerNode = *deschedulerPolicy.MaxNoOfPodsToEvictPerNode
}
wait.Until(func() {
nodes, err := nodeutil.ReadyNodes(ctx, rs.Client, nodeInformer, nodeSelector)
if err != nil {
klog.V(1).InfoS("Unable to get ready nodes", "err", err)
@@ -233,55 +127,21 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
return
}
var podEvictorClient clientset.Interface
// When the dry mode is enable, collect all the relevant objects (mostly pods) under a fake client.
// So when evicting pods while running multiple strategies in a row have the cummulative effect
// as is when evicting pods for real.
if rs.DryRun {
klog.V(3).Infof("Building a cached client from the cluster for the dry run")
// Create a new cache so we start from scratch without any leftovers
fakeClient, err := cachedClient(rs.Client, podInformer, nodeInformer, namespaceInformer, priorityClassInformer)
if err != nil {
klog.Error(err)
return
}
fakeSharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
getPodsAssignedToNode, err = podutil.BuildGetPodsAssignedToNodeFunc(fakeSharedInformerFactory.Core().V1().Pods())
if err != nil {
klog.Errorf("build get pods assigned to node function error: %v", err)
return
}
fakeCtx, cncl := context.WithCancel(context.TODO())
defer cncl()
fakeSharedInformerFactory.Start(fakeCtx.Done())
fakeSharedInformerFactory.WaitForCacheSync(fakeCtx.Done())
podEvictorClient = fakeClient
} else {
podEvictorClient = rs.Client
}
klog.V(3).Infof("Building a pod evictor")
podEvictor := evictions.NewPodEvictor(
podEvictorClient,
rs.Client,
evictionPolicyGroupVersion,
rs.DryRun,
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
maxNoOfPodsToEvictPerNode,
nodes,
evictLocalStoragePods,
evictSystemCriticalPods,
ignorePvcPods,
evictBarePods,
!rs.DisableMetrics,
)
for name, strategy := range deschedulerPolicy.Strategies {
if f, ok := strategyFuncs[name]; ok {
if strategy.Enabled {
f(ctx, rs.Client, strategy, nodes, podEvictor, getPodsAssignedToNode)
f(ctx, rs.Client, strategy, nodes, podEvictor)
}
} else {
klog.ErrorS(fmt.Errorf("unknown strategy name"), "skipping strategy", "strategy", name)

View File

@@ -55,7 +55,7 @@ func TestTaintsUpdated(t *testing.T) {
if err != nil {
t.Fatalf("Unable to run descheduler strategies: %v", err)
}
case <-time.After(1 * time.Second):
case <-time.After(300 * time.Millisecond):
// Wait for few cycles and then verify the only pod still exists
}

View File

@@ -45,70 +45,57 @@ const (
)
// nodePodEvictedCount keeps count of pods evicted on node
type nodePodEvictedCount map[*v1.Node]uint
type namespacePodEvictCount map[string]uint
type nodePodEvictedCount map[*v1.Node]int
type PodEvictor struct {
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode *uint
maxPodsToEvictPerNamespace *uint
nodepodCount nodePodEvictedCount
namespacePodCount namespacePodEvictCount
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
metricsEnabled bool
client clientset.Interface
nodes []*v1.Node
policyGroupVersion string
dryRun bool
maxPodsToEvictPerNode int
nodepodCount nodePodEvictedCount
evictLocalStoragePods bool
evictSystemCriticalPods bool
ignorePvcPods bool
}
func NewPodEvictor(
client clientset.Interface,
policyGroupVersion string,
dryRun bool,
maxPodsToEvictPerNode *uint,
maxPodsToEvictPerNamespace *uint,
maxPodsToEvictPerNode int,
nodes []*v1.Node,
evictLocalStoragePods bool,
evictSystemCriticalPods bool,
ignorePvcPods bool,
evictFailedBarePods bool,
metricsEnabled bool,
) *PodEvictor {
var nodePodCount = make(nodePodEvictedCount)
var namespacePodCount = make(namespacePodEvictCount)
for _, node := range nodes {
// Initialize podsEvicted till now with 0.
nodePodCount[node] = 0
}
return &PodEvictor{
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
maxPodsToEvictPerNamespace: maxPodsToEvictPerNamespace,
nodepodCount: nodePodCount,
namespacePodCount: namespacePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
evictFailedBarePods: evictFailedBarePods,
ignorePvcPods: ignorePvcPods,
metricsEnabled: metricsEnabled,
client: client,
nodes: nodes,
policyGroupVersion: policyGroupVersion,
dryRun: dryRun,
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
nodepodCount: nodePodCount,
evictLocalStoragePods: evictLocalStoragePods,
evictSystemCriticalPods: evictSystemCriticalPods,
ignorePvcPods: ignorePvcPods,
}
}
// NodeEvicted gives a number of pods evicted for node
func (pe *PodEvictor) NodeEvicted(node *v1.Node) uint {
func (pe *PodEvictor) NodeEvicted(node *v1.Node) int {
return pe.nodepodCount[node]
}
// TotalEvicted gives a number of pods evicted through all nodes
func (pe *PodEvictor) TotalEvicted() uint {
var total uint
func (pe *PodEvictor) TotalEvicted() int {
var total int
for _, count := range pe.nodepodCount {
total += count
}
@@ -123,37 +110,20 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
if len(reasons) > 0 {
reason += " (" + strings.Join(reasons, ", ") + ")"
}
if pe.maxPodsToEvictPerNode != nil && pe.nodepodCount[node]+1 > *pe.maxPodsToEvictPerNode {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per node reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", *pe.maxPodsToEvictPerNode, node.Name)
if pe.maxPodsToEvictPerNode > 0 && pe.nodepodCount[node]+1 > pe.maxPodsToEvictPerNode {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number reached", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, fmt.Errorf("Maximum number %v of evicted pods per %q node reached", pe.maxPodsToEvictPerNode, node.Name)
}
if pe.maxPodsToEvictPerNamespace != nil && pe.namespacePodCount[pod.Namespace]+1 > *pe.maxPodsToEvictPerNamespace {
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "maximum number of pods per namespace reached", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
return false, fmt.Errorf("Maximum number %v of evicted pods per %q namespace reached", *pe.maxPodsToEvictPerNamespace, pod.Namespace)
}
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion)
err := evictPod(ctx, pe.client, pod, pe.policyGroupVersion, pe.dryRun)
if err != nil {
// err is used only for logging purposes
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod), "reason", reason)
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
metrics.PodsEvicted.With(map[string]string{"result": "error", "strategy": strategy, "namespace": pod.Namespace}).Inc()
return false, nil
}
pe.nodepodCount[node]++
pe.namespacePodCount[pod.Namespace]++
if pe.metricsEnabled {
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace, "node": node.Name}).Inc()
}
if pe.dryRun {
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
} else {
@@ -163,11 +133,15 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
r := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "sigs.k8s.io.descheduler"})
r.Event(pod, v1.EventTypeNormal, "Descheduled", fmt.Sprintf("pod evicted by sigs.k8s.io/descheduler%s", reason))
metrics.PodsEvicted.With(map[string]string{"result": "success", "strategy": strategy, "namespace": pod.Namespace}).Inc()
}
return true, nil
}
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string) error {
func evictPod(ctx context.Context, client clientset.Interface, pod *v1.Pod, policyGroupVersion string, dryRun bool) error {
if dryRun {
return nil
}
deleteOptions := &metav1.DeleteOptions{}
// GracePeriodSeconds ?
eviction := &policy.Eviction{
@@ -241,25 +215,6 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
}
ev := &evictable{}
if pe.evictFailedBarePods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Enable evictFailedBarePods to evict bare pods in failed phase
if len(ownerRefList) == 0 && pod.Status.Phase != v1.PodFailed {
return fmt.Errorf("pod does not have any ownerRefs and is not in failed phase")
}
return nil
})
} else {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
ownerRefList := podutil.OwnerRef(pod)
// Moved from IsEvictable function for backward compatibility
if len(ownerRefList) == 0 {
return fmt.Errorf("pod does not have any ownerRefs")
}
return nil
})
}
if !pe.evictSystemCriticalPods {
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
// Moved from IsEvictable function to allow for disabling
@@ -323,6 +278,10 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs = append(checkErrs, fmt.Errorf("pod is a DaemonSet pod"))
}
if len(ownerRefList) == 0 {
checkErrs = append(checkErrs, fmt.Errorf("pod does not have any ownerrefs"))
}
if utils.IsMirrorPod(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is a mirror pod"))
}
@@ -331,10 +290,6 @@ func (ev *evictable) IsEvictable(pod *v1.Pod) bool {
checkErrs = append(checkErrs, fmt.Errorf("pod is a static pod"))
}
if utils.IsPodTerminating(pod) {
checkErrs = append(checkErrs, fmt.Errorf("pod is terminating"))
}
for _, c := range ev.constraints {
if err := c(pod); err != nil {
checkErrs = append(checkErrs, err)

View File

@@ -62,7 +62,7 @@ func TestEvictPod(t *testing.T) {
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
got := evictPod(ctx, fakeClient, test.pod, "v1")
got := evictPod(ctx, fakeClient, test.pod, "v1", false)
if got != test.want {
t.Errorf("Test error for Desc: %s. Expected %v pod eviction to be %v, got %v", test.description, test.pod.Name, test.want, got)
}
@@ -83,7 +83,6 @@ func TestIsEvictable(t *testing.T) {
pod *v1.Pod
nodes []*v1.Node
runBefore func(*v1.Pod, []*v1.Node)
evictFailedBarePods bool
evictLocalStoragePods bool
evictSystemCriticalPods bool
priorityThreshold *int32
@@ -92,27 +91,7 @@ func TestIsEvictable(t *testing.T) {
}
testCases := []testCase{
{ // Failed pod eviction with no ownerRefs.
pod: test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.Status.Phase = v1.PodFailed
},
evictFailedBarePods: false,
result: false,
}, { // Normal pod eviction with no ownerRefs and evictFailedBarePods enabled
pod: test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
},
evictFailedBarePods: true,
result: false,
}, { // Failed pod eviction with no ownerRefs
pod: test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.Status.Phase = v1.PodFailed
},
evictFailedBarePods: true,
result: true,
}, { // Normal pod eviction with normal ownerRefs
{ // Normal pod eviction with normal ownerRefs
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
@@ -438,7 +417,6 @@ func TestIsEvictable(t *testing.T) {
podEvictor := &PodEvictor{
evictLocalStoragePods: test.evictLocalStoragePods,
evictSystemCriticalPods: test.evictSystemCriticalPods,
evictFailedBarePods: test.evictFailedBarePods,
nodes: nodes,
}

View File

@@ -17,174 +17,139 @@ limitations under the License.
package pod
import (
"context"
"sort"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/utils"
)
const (
nodeNameKeyIndex = "spec.nodeName"
)
// FilterFunc is a filter for a pod.
type FilterFunc func(*v1.Pod) bool
// GetPodsAssignedToNodeFunc is a function which accept a node name and a pod filter function
// as input and returns the pods that assigned to the node.
type GetPodsAssignedToNodeFunc func(string, FilterFunc) ([]*v1.Pod, error)
// WrapFilterFuncs wraps a set of FilterFunc in one.
func WrapFilterFuncs(filters ...FilterFunc) FilterFunc {
return func(pod *v1.Pod) bool {
for _, filter := range filters {
if filter != nil && !filter(pod) {
return false
}
}
return true
}
}
type Options struct {
filter FilterFunc
includedNamespaces sets.String
excludedNamespaces sets.String
filter func(pod *v1.Pod) bool
includedNamespaces []string
excludedNamespaces []string
labelSelector *metav1.LabelSelector
}
// NewOptions returns an empty Options.
func NewOptions() *Options {
return &Options{}
}
// WithFilter sets a pod filter.
// The filter function should return true if the pod should be returned from ListPodsOnANode
func (o *Options) WithFilter(filter FilterFunc) *Options {
o.filter = filter
return o
func WithFilter(filter func(pod *v1.Pod) bool) func(opts *Options) {
return func(opts *Options) {
opts.filter = filter
}
}
// WithNamespaces sets included namespaces
func (o *Options) WithNamespaces(namespaces sets.String) *Options {
o.includedNamespaces = namespaces
return o
func WithNamespaces(namespaces []string) func(opts *Options) {
return func(opts *Options) {
opts.includedNamespaces = namespaces
}
}
// WithoutNamespaces sets excluded namespaces
func (o *Options) WithoutNamespaces(namespaces sets.String) *Options {
o.excludedNamespaces = namespaces
return o
func WithoutNamespaces(namespaces []string) func(opts *Options) {
return func(opts *Options) {
opts.excludedNamespaces = namespaces
}
}
// WithLabelSelector sets a pod label selector
func (o *Options) WithLabelSelector(labelSelector *metav1.LabelSelector) *Options {
o.labelSelector = labelSelector
return o
func WithLabelSelector(labelSelector *metav1.LabelSelector) func(opts *Options) {
return func(opts *Options) {
opts.labelSelector = labelSelector
}
}
// BuildFilterFunc builds a final FilterFunc based on Options.
func (o *Options) BuildFilterFunc() (FilterFunc, error) {
var s labels.Selector
var err error
if o.labelSelector != nil {
s, err = metav1.LabelSelectorAsSelector(o.labelSelector)
if err != nil {
return nil, err
}
}
return func(pod *v1.Pod) bool {
if o.filter != nil && !o.filter(pod) {
return false
}
if len(o.includedNamespaces) > 0 && !o.includedNamespaces.Has(pod.Namespace) {
return false
}
if len(o.excludedNamespaces) > 0 && o.excludedNamespaces.Has(pod.Namespace) {
return false
}
if s != nil && !s.Matches(labels.Set(pod.GetLabels())) {
return false
}
return true
}, nil
}
// BuildGetPodsAssignedToNodeFunc establishes an indexer to map the pods and their assigned nodes.
// It returns a function to help us get all the pods that assigned to a node based on the indexer.
func BuildGetPodsAssignedToNodeFunc(podInformer coreinformers.PodInformer) (GetPodsAssignedToNodeFunc, error) {
// Establish an indexer to map the pods and their assigned nodes.
err := podInformer.Informer().AddIndexers(cache.Indexers{
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
},
})
if err != nil {
return nil, err
// ListPodsOnANode lists all of the pods on a node
// It also accepts an optional "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
func ListPodsOnANode(
ctx context.Context,
client clientset.Interface,
node *v1.Node,
opts ...func(opts *Options),
) ([]*v1.Pod, error) {
options := &Options{}
for _, opt := range opts {
opt(options)
}
// The indexer helps us get all the pods that assigned to a node.
podIndexer := podInformer.Informer().GetIndexer()
getPodsAssignedToNode := func(nodeName string, filter FilterFunc) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
pods := make([]*v1.Pod, 0)
fieldSelectorString := "spec.nodeName=" + node.Name + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)
labelSelectorString := ""
if options.labelSelector != nil {
selector, err := metav1.LabelSelectorAsSelector(options.labelSelector)
if err != nil {
return nil, err
return []*v1.Pod{}, err
}
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
labelSelectorString = selector.String()
}
if len(options.includedNamespaces) > 0 {
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
if err != nil {
return []*v1.Pod{}, err
}
for _, namespace := range options.includedNamespaces {
podList, err := client.CoreV1().Pods(namespace).List(ctx,
metav1.ListOptions{
FieldSelector: fieldSelector.String(),
LabelSelector: labelSelectorString,
})
if err != nil {
return []*v1.Pod{}, err
}
if filter(pod) {
pods = append(pods, pod)
for i := range podList.Items {
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])
}
}
return pods, nil
}
return getPodsAssignedToNode, nil
}
// ListPodsOnANode lists all pods on a node.
// It also accepts a "filter" function which can be used to further limit the pods that are returned.
// (Usually this is podEvictor.Evictable().IsEvictable, in order to only list the evictable pods on a node, but can
// be used by strategies to extend it if there are further restrictions, such as with NodeAffinity).
func ListPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
// Succeeded and failed pods are not considered because they don't occupy any resource.
f := func(pod *v1.Pod) bool {
return pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed
if len(options.excludedNamespaces) > 0 {
for _, namespace := range options.excludedNamespaces {
fieldSelectorString += ",metadata.namespace!=" + namespace
}
}
return ListAllPodsOnANode(nodeName, getPodsAssignedToNode, WrapFilterFuncs(f, filter))
}
// ListAllPodsOnANode lists all the pods on a node no matter what the phase of the pod is.
func ListAllPodsOnANode(
nodeName string,
getPodsAssignedToNode GetPodsAssignedToNodeFunc,
filter FilterFunc,
) ([]*v1.Pod, error) {
pods, err := getPodsAssignedToNode(nodeName, filter)
fieldSelector, err := fields.ParseSelector(fieldSelectorString)
if err != nil {
return []*v1.Pod{}, err
}
// INFO(jchaloup): field selectors do not work properly with listers
// Once the descheduler switches to pod listers (through informers),
// We need to flip to client-side filtering.
podList, err := client.CoreV1().Pods(v1.NamespaceAll).List(ctx,
metav1.ListOptions{
FieldSelector: fieldSelector.String(),
LabelSelector: labelSelectorString,
})
if err != nil {
return []*v1.Pod{}, err
}
for i := range podList.Items {
// fake client does not support field selectors
// so let's filter based on the node name as well (quite cheap)
if podList.Items[i].Spec.NodeName != node.Name {
continue
}
if options.filter != nil && !options.filter(&podList.Items[i]) {
continue
}
pods = append(pods, &podList.Items[i])
}
return pods, nil
}

View File

@@ -18,15 +18,16 @@ package pod
import (
"context"
"fmt"
"reflect"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/test"
)
@@ -38,17 +39,19 @@ var (
func TestListPodsOnANode(t *testing.T) {
testCases := []struct {
name string
pods []*v1.Pod
pods map[string][]v1.Pod
node *v1.Node
labelSelector *metav1.LabelSelector
expectedPodCount int
}{
{
name: "test listing pods on a node",
pods: []*v1.Pod{
test.BuildTestPod("pod1", 100, 0, "n1", nil),
test.BuildTestPod("pod2", 100, 0, "n1", nil),
test.BuildTestPod("pod3", 100, 0, "n2", nil),
pods: map[string][]v1.Pod{
"n1": {
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
*test.BuildTestPod("pod2", 100, 0, "n1", nil),
},
"n2": {*test.BuildTestPod("pod3", 100, 0, "n2", nil)},
},
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: nil,
@@ -56,15 +59,17 @@ func TestListPodsOnANode(t *testing.T) {
},
{
name: "test listing pods with label selector",
pods: []*v1.Pod{
test.BuildTestPod("pod1", 100, 0, "n1", nil),
test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar"}
}),
test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar1"}
}),
test.BuildTestPod("pod4", 100, 0, "n2", nil),
pods: map[string][]v1.Pod{
"n1": {
*test.BuildTestPod("pod1", 100, 0, "n1", nil),
*test.BuildTestPod("pod2", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar"}
}),
*test.BuildTestPod("pod3", 100, 0, "n1", func(pod *v1.Pod) {
pod.Labels = map[string]string{"foo": "bar1"}
}),
},
"n2": {*test.BuildTestPod("pod4", 100, 0, "n2", nil)},
},
node: test.BuildTestNode("n1", 2000, 3000, 10, nil),
labelSelector: &metav1.LabelSelector{
@@ -80,38 +85,21 @@ func TestListPodsOnANode(t *testing.T) {
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
objs = append(objs, testCase.node)
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
filter, err := NewOptions().WithLabelSelector(testCase.labelSelector).BuildFilterFunc()
if err != nil {
t.Errorf("Build filter function error: %v", err)
}
pods, _ := ListPodsOnANode(testCase.node.Name, getPodsAssignedToNode, filter)
if len(pods) != testCase.expectedPodCount {
t.Errorf("Expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, "n1") {
return true, &v1.PodList{Items: testCase.pods["n1"]}, nil
} else if strings.Contains(fieldString, "n2") {
return true, &v1.PodList{Items: testCase.pods["n2"]}, nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
pods, _ := ListPodsOnANode(context.TODO(), fakeClient, testCase.node, WithLabelSelector(testCase.labelSelector))
if len(pods) != testCase.expectedPodCount {
t.Errorf("expected %v pods on node %v, got %+v", testCase.expectedPodCount, testCase.node.Name, len(pods))
}
}
}

View File

@@ -66,7 +66,6 @@ func RemoveDuplicatePods(
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
if err := validateRemoveDuplicatePodsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemoveDuplicatePods parameters")
@@ -78,10 +77,10 @@ func RemoveDuplicatePods(
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
if strategy.Params != nil && strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
nodeFit := false
@@ -96,19 +95,15 @@ func RemoveDuplicatePods(
nodeCount := 0
nodeMap := make(map[string]*v1.Node)
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
pods, err := podutil.ListPodsOnANode(ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
)
if err != nil {
klog.ErrorS(err, "Error listing evictable pods on node", "node", klog.KObj(node))
continue

View File

@@ -25,12 +25,10 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
@@ -45,6 +43,7 @@ func buildTestPodWithImage(podName, node, image string) *v1.Pod {
}
func TestFindDuplicatePods(t *testing.T) {
ctx := context.Background()
// first setup pods
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
@@ -174,91 +173,104 @@ func TestFindDuplicatePods(t *testing.T) {
testCases := []struct {
description string
pods []*v1.Pod
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount uint
expectedEvictedPodCount int
strategy api.DeschedulerStrategy
}{
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet, but ReplicaSet kind is excluded. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{RemoveDuplicates: &api.RemoveDuplicates{ExcludeOwnerKinds: []string{"ReplicaSet"}}}},
},
{
description: "Three Pods in the `test` Namespace, bound to same ReplicaSet. 1 should be evicted.",
pods: []*v1.Pod{p8, p9, p10},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 1,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three Pods in the `dev` Namespace, three Pods in the `test` Namespace. Bound to ReplicaSet with same name. 4 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p8, p9, p10},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods are: part of DaemonSet, with local storage, mirror pod annotation, critical pod annotation - none should be evicted.",
pods: []*v1.Pod{p4, p5, p6, p7},
maxPodsToEvictPerNode: 2,
pods: []v1.Pod{*p4, *p5, *p6, *p7},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Test all Pods: 4 should be evicted.",
pods: []*v1.Pod{p1, p2, p3, p4, p5, p6, p7, p8, p9, p10},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3, *p4, *p5, *p6, *p7, *p8, *p9, *p10},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 2,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with the same owner but different images should not be evicted",
pods: []*v1.Pod{p11, p12},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p12},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with multiple containers should not match themselves",
pods: []*v1.Pod{p13},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p13},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Pods with matching ownerrefs and at not all matching image should not trigger an eviction",
pods: []*v1.Pod{p11, p13},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11, *p13},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{},
},
{
description: "Three pods in the `dev` Namespace, bound to same ReplicaSet. Only node available has a taint, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet, all with a nodeSelector. Only node available has an incorrect node label, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p15, p16, p17},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p15, *p16, *p17},
nodes: []*v1.Node{node1, node4},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
},
{
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available is not schedulable, and nodeFit set to true. 0 should be evicted.",
pods: []*v1.Pod{p1, p2, p3},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node5},
expectedEvictedPodCount: 0,
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
@@ -267,44 +279,22 @@ func TestFindDuplicatePods(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range testCase.nodes {
objs = append(objs, node)
}
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
testCase.maxPodsToEvictPerNode,
testCase.nodes,
false,
false,
false,
false,
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)
@@ -315,6 +305,8 @@ func TestFindDuplicatePods(t *testing.T) {
}
func TestRemoveDuplicatesUniformly(t *testing.T) {
ctx := context.Background()
setRSOwnerRef2 := func(pod *v1.Pod) {
pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{
{Kind: "ReplicaSet", APIVersion: "v1", Name: "replicaset-2"},
@@ -440,24 +432,25 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
testCases := []struct {
description string
pods []*v1.Pod
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount uint
expectedEvictedPodCount int
strategy api.DeschedulerStrategy
}{
{
description: "Evict pods uniformly",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
@@ -469,17 +462,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly with one node left out",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1) -> (4,4,1) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p3", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p4", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p5", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p6", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p7", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p8", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p9", 100, 0, "n3", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
@@ -490,17 +483,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly with two replica sets",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
*test.BuildTestPod("p11", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", setTwoRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", setTwoRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", setTwoRSOwnerRef),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
@@ -512,27 +505,27 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly with two owner references",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
*test.BuildTestPod("p11", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p12", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p13", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p14", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p15", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p16", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p17", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p18", 100, 0, "n2", test.SetRSOwnerRef),
*test.BuildTestPod("p19", 100, 0, "n3", test.SetRSOwnerRef),
// (1,3,5) -> (3,3,3) -> 2 evictions
test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p21", 100, 0, "n1", setRSOwnerRef2),
*test.BuildTestPod("p22", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p23", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p24", 100, 0, "n2", setRSOwnerRef2),
*test.BuildTestPod("p25", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p26", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p27", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p28", 100, 0, "n3", setRSOwnerRef2),
*test.BuildTestPod("p29", 100, 0, "n3", setRSOwnerRef2),
},
expectedEvictedPodCount: 4,
nodes: []*v1.Node{
@@ -544,10 +537,10 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods with number of pods less than nodes",
pods: []*v1.Pod{
pods: []v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p2", 100, 0, "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
@@ -559,14 +552,14 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods with number of pods less than nodes, but ignore different pods with the same ownerref",
pods: []*v1.Pod{
pods: []v1.Pod{
// (1, 0, 0) for "bar","baz" images -> no eviction, even with a matching ownerKey
// (2, 0, 0) for "foo" image -> (1,1,0) - 1 eviction
// In this case the only "real" duplicates are p1 and p4, so one of those should be evicted
buildTestPodWithImage("p1", "n1", "foo"),
buildTestPodWithImage("p2", "n1", "bar"),
buildTestPodWithImage("p3", "n1", "baz"),
buildTestPodWithImage("p4", "n1", "foo"),
*buildTestPodWithImage("p1", "n1", "foo"),
*buildTestPodWithImage("p2", "n1", "bar"),
*buildTestPodWithImage("p3", "n1", "baz"),
*buildTestPodWithImage("p4", "n1", "foo"),
},
expectedEvictedPodCount: 1,
nodes: []*v1.Node{
@@ -578,9 +571,9 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods with a single pod with three nodes",
pods: []*v1.Pod{
pods: []v1.Pod{
// (2,0,0) -> (1,1,0) -> 1 eviction
test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
*test.BuildTestPod("p1", 100, 0, "n1", test.SetRSOwnerRef),
},
expectedEvictedPodCount: 0,
nodes: []*v1.Node{
@@ -592,17 +585,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly respecting taints",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
*test.BuildTestPod("p1", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setTolerationsK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setTolerationsK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setTolerationsK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setTolerationsK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setTolerationsK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setTolerationsK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setTolerationsK1),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
@@ -617,17 +610,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly respecting RequiredDuringSchedulingIgnoredDuringExecution node affinity",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p1", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setNotMasterNodeSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setNotMasterNodeSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setNotMasterNodeSelectorK1),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
@@ -642,17 +635,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly respecting node selector",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
},
expectedEvictedPodCount: 2,
nodes: []*v1.Node{
@@ -667,17 +660,17 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
},
{
description: "Evict pods uniformly respecting node selector with zero target nodes",
pods: []*v1.Pod{
pods: []v1.Pod{
// (5,3,1,0,0,0) -> (3,3,3,0,0,0) -> 2 evictions
test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
*test.BuildTestPod("p1", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p2", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p3", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p4", 100, 0, "worker1", setWorkerLabelSelectorK2),
*test.BuildTestPod("p5", 100, 0, "worker1", setWorkerLabelSelectorK1),
*test.BuildTestPod("p6", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p7", 100, 0, "worker2", setWorkerLabelSelectorK1),
*test.BuildTestPod("p8", 100, 0, "worker2", setWorkerLabelSelectorK2),
*test.BuildTestPod("p9", 100, 0, "worker3", setWorkerLabelSelectorK1),
},
expectedEvictedPodCount: 0,
nodes: []*v1.Node{
@@ -694,44 +687,22 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range testCase.nodes {
objs = append(objs, node)
}
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: testCase.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
testCase.maxPodsToEvictPerNode,
testCase.nodes,
false,
false,
false,
false,
false,
)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
RemoveDuplicatePods(ctx, fakeClient, testCase.strategy, testCase.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != testCase.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", testCase.description, testCase.expectedEvictedPodCount, podsEvicted)

View File

@@ -1,179 +0,0 @@
package strategies
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
)
// validatedFailedPodsStrategyParams contains validated strategy parameters
type validatedFailedPodsStrategyParams struct {
validation.ValidatedStrategyParams
includingInitContainers bool
reasons sets.String
excludeOwnerKinds sets.String
minPodLifetimeSeconds *uint
}
// RemoveFailedPods removes Pods that are in failed status phase.
func RemoveFailedPods(
ctx context.Context,
client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validateAndParseRemoveFailedPodsParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Invalid RemoveFailedPods parameters")
return
}
evictable := podEvictor.Evictable(
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
evictions.WithNodeFit(strategyParams.NodeFit),
evictions.WithLabelSelector(strategyParams.LabelSelector),
)
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
labelSelector = strategy.Params.LabelSelector
}
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(strategyParams.IncludedNamespaces).
WithoutNamespaces(strategyParams.ExcludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
// Only list failed pods
phaseFilter := func(pod *v1.Pod) bool { return pod.Status.Phase == v1.PodFailed }
podFilter = podutil.WrapFilterFuncs(phaseFilter, podFilter)
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
if err != nil {
klog.ErrorS(err, "Error listing a nodes failed pods", "node", klog.KObj(node))
continue
}
for i, pod := range pods {
if err = validateFailedPodShouldEvict(pod, *strategyParams); err != nil {
klog.V(4).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(pod))
continue
}
if _, err = podEvictor.EvictPod(ctx, pods[i], node, "FailedPod"); err != nil {
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
break
}
}
}
}
func validateAndParseRemoveFailedPodsParams(
ctx context.Context,
client clientset.Interface,
params *api.StrategyParameters,
) (*validatedFailedPodsStrategyParams, error) {
if params == nil {
return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: validation.DefaultValidatedStrategyParams(),
}, nil
}
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, params)
if err != nil {
return nil, err
}
var reasons, excludeOwnerKinds sets.String
var includingInitContainers bool
var minPodLifetimeSeconds *uint
if params.FailedPods != nil {
reasons = sets.NewString(params.FailedPods.Reasons...)
includingInitContainers = params.FailedPods.IncludingInitContainers
excludeOwnerKinds = sets.NewString(params.FailedPods.ExcludeOwnerKinds...)
minPodLifetimeSeconds = params.FailedPods.MinPodLifetimeSeconds
}
return &validatedFailedPodsStrategyParams{
ValidatedStrategyParams: *strategyParams,
includingInitContainers: includingInitContainers,
reasons: reasons,
excludeOwnerKinds: excludeOwnerKinds,
minPodLifetimeSeconds: minPodLifetimeSeconds,
}, nil
}
// validateFailedPodShouldEvict looks at strategy params settings to see if the Pod
// should be evicted given the params in the PodFailed policy.
func validateFailedPodShouldEvict(pod *v1.Pod, strategyParams validatedFailedPodsStrategyParams) error {
var errs []error
if strategyParams.minPodLifetimeSeconds != nil {
podAgeSeconds := uint(metav1.Now().Sub(pod.GetCreationTimestamp().Local()).Seconds())
if podAgeSeconds < *strategyParams.minPodLifetimeSeconds {
errs = append(errs, fmt.Errorf("pod does not exceed the min age seconds of %d", *strategyParams.minPodLifetimeSeconds))
}
}
if len(strategyParams.excludeOwnerKinds) > 0 {
ownerRefList := podutil.OwnerRef(pod)
for _, owner := range ownerRefList {
if strategyParams.excludeOwnerKinds.Has(owner.Kind) {
errs = append(errs, fmt.Errorf("pod's owner kind of %s is excluded", owner.Kind))
}
}
}
if len(strategyParams.reasons) > 0 {
reasons := getFailedContainerStatusReasons(pod.Status.ContainerStatuses)
if pod.Status.Phase == v1.PodFailed && pod.Status.Reason != "" {
reasons = append(reasons, pod.Status.Reason)
}
if strategyParams.includingInitContainers {
reasons = append(reasons, getFailedContainerStatusReasons(pod.Status.InitContainerStatuses)...)
}
if !strategyParams.reasons.HasAny(reasons...) {
errs = append(errs, fmt.Errorf("pod does not match any of the reasons"))
}
}
return utilerrors.NewAggregate(errs)
}
func getFailedContainerStatusReasons(containerStatuses []v1.ContainerStatus) []string {
reasons := make([]string, 0)
for _, containerStatus := range containerStatuses {
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason != "" {
reasons = append(reasons, containerStatus.State.Waiting.Reason)
}
if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.Reason != "" {
reasons = append(reasons, containerStatus.State.Terminated.Reason)
}
}
return reasons
}

View File

@@ -1,343 +0,0 @@
package strategies
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
var (
OneHourInSeconds uint = 3600
)
func TestRemoveFailedPods(t *testing.T) {
createStrategy := func(enabled, includingInitContainers bool, reasons, excludeKinds []string, minAgeSeconds *uint, nodeFit bool) api.DeschedulerStrategy {
return api.DeschedulerStrategy{
Enabled: enabled,
Params: &api.StrategyParameters{
FailedPods: &api.FailedPods{
Reasons: reasons,
IncludingInitContainers: includingInitContainers,
ExcludeOwnerKinds: excludeKinds,
MinPodLifetimeSeconds: minAgeSeconds,
},
NodeFit: nodeFit,
},
}
}
tests := []struct {
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
pods []*v1.Pod
}{
{
description: "default empty strategy, 0 failures, 0 evictions",
strategy: api.DeschedulerStrategy{},
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
},
{
description: "0 failures, 0 evictions",
strategy: createStrategy(true, false, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{}, // no pods come back with field selector phase=Failed
},
{
description: "1 container terminated with reason NodeAffinity, 1 eviction",
strategy: createStrategy(true, false, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "1 init container terminated with reason NodeAffinity, 1 eviction",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "1 init container waiting with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "2 init container waiting with reason CreateContainerConfigError, 2 nodes, 2 evictions",
strategy: createStrategy(true, true, nil, nil, nil, false),
nodes: []*v1.Node{
test.BuildTestNode("node1", 2000, 3000, 10, nil),
test.BuildTestNode("node2", 2000, 3000, 10, nil),
},
expectedEvictedPodCount: 2,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}, nil), nil),
buildTestPod("p2", "node2", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "include reason=CreateContainerConfigError, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}), nil),
},
},
{
description: "include reason=CreateContainerConfigError+NodeAffinity, 1 container terminated with reason CreateContainerConfigError, 1 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError", "NodeAffinity"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
}), nil),
},
},
{
description: "include reason=CreateContainerConfigError, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "include init container=false, 1 init container waiting with reason CreateContainerConfigError, 0 eviction",
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{Reason: "CreateContainerConfigError"},
}, nil), nil),
},
},
{
description: "lifetime 1 hour, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, nil, nil, &OneHourInSeconds, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
strategy: createStrategy(true, false, nil, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Unschedulable = true
})},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}), nil),
},
},
{
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 eviction",
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 1,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), nil),
},
},
{
description: "excluded owner kind=DaemonSet, 1 init container terminated with owner kind=ReplicaSet, 1 pod in termination; nothing should be moved",
strategy: createStrategy(true, true, nil, []string{"DaemonSet"}, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("", "", &v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{Reason: "NodeAffinity"},
}, nil), &metav1.Time{}),
},
},
{
description: "1 container terminated with reason ShutDown, 0 evictions",
strategy: createStrategy(true, false, nil, nil, nil, true),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 0,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
},
},
{
description: "include reason=Shutdown, 2 containers terminated with reason ShutDown, 2 evictions",
strategy: createStrategy(true, false, []string{"Shutdown"}, nil, nil, false),
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, nil)},
expectedEvictedPodCount: 2,
pods: []*v1.Pod{
buildTestPod("p1", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
buildTestPod("p2", "node1", newPodStatus("Shutdown", v1.PodFailed, nil, nil), nil),
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,
false,
false,
false,
)
RemoveFailedPods(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
})
}
}
func TestValidRemoveFailedPodsParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate reasons params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
Reasons: []string{"CreateContainerConfigError"},
}}},
{name: "validate includingInitContainers params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
IncludingInitContainers: true,
}}},
{name: "validate excludeOwnerKinds params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
ExcludeOwnerKinds: []string{"Job"},
}}},
{name: "validate excludeOwnerKinds params", params: &api.StrategyParameters{FailedPods: &api.FailedPods{
MinPodLifetimeSeconds: &OneHourInSeconds,
}}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := validateAndParseRemoveFailedPodsParams(ctx, fakeClient, tc.params)
if err != nil {
t.Errorf("strategy params should be valid but got err: %v", err.Error())
}
if params == nil {
t.Errorf("strategy params should return a ValidatedFailedPodsStrategyParams but got nil")
}
})
}
}
func newPodStatus(reason string, phase v1.PodPhase, initContainerState, containerState *v1.ContainerState) v1.PodStatus {
ps := v1.PodStatus{
Reason: reason,
Phase: phase,
}
if initContainerState != nil {
ps.InitContainerStatuses = []v1.ContainerStatus{{State: *initContainerState}}
ps.Phase = v1.PodFailed
}
if containerState != nil {
ps.ContainerStatuses = []v1.ContainerStatus{{State: *containerState}}
ps.Phase = v1.PodFailed
}
return ps
}
func buildTestPod(podName, nodeName string, podStatus v1.PodStatus, deletionTimestamp *metav1.Time) *v1.Pod {
pod := test.BuildTestPod(podName, 1, 1, nodeName, func(p *v1.Pod) {
p.Status = podStatus
})
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
pod.ObjectMeta.SetCreationTimestamp(metav1.Now())
pod.DeletionTimestamp = deletionTimestamp
return pod
}

View File

@@ -21,7 +21,6 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -48,7 +47,7 @@ func validatePodsViolatingNodeAffinityParams(params *api.StrategyParameters) err
}
// RemovePodsViolatingNodeAffinity evicts pods on nodes which violate node affinity
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validatePodsViolatingNodeAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeAffinity parameters")
return
@@ -59,10 +58,10 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
nodeFit := false
@@ -72,16 +71,6 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, nodeAffinity := range strategy.Params.NodeAffinityType {
klog.V(2).InfoS("Executing for nodeAffinityType", "nodeAffinity", nodeAffinity)
@@ -91,13 +80,17 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(
node.Name,
getPodsAssignedToNode,
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
ctx,
client,
node,
podutil.WithFilter(func(pod *v1.Pod) bool {
return evictable.IsEvictable(pod) &&
!nodeutil.PodFitsCurrentNode(pod, node) &&
nodeutil.PodFitsAnyNode(pod, nodes)
}),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
)
if err != nil {
klog.ErrorS(err, "Failed to get pods", "node", klog.KObj(node))

View File

@@ -22,18 +22,16 @@ import (
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
ctx := context.Background()
requiredDuringSchedulingIgnoredDuringExecutionStrategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
@@ -61,10 +59,10 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
nodeWithoutLabels := test.BuildTestNode("nodeWithoutLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels := test.BuildTestNode("unschedulableNodeWithLabels", 2000, 3000, 10, nil)
unschedulableNodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
nodeWithLabels.Labels[nodeLabelKey] = nodeLabelValue
unschedulableNodeWithLabels.Spec.Unschedulable = true
addPodsToNode := func(node *v1.Node, deletionTimestamp *metav1.Time) []*v1.Pod {
addPodsToNode := func(node *v1.Node) []v1.Pod {
podWithNodeAffinity := test.BuildTestPod("podWithNodeAffinity", 100, 0, node.Name, nil)
podWithNodeAffinity.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
@@ -93,25 +91,20 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
pod1.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod2.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
pod1.DeletionTimestamp = deletionTimestamp
pod2.DeletionTimestamp = deletionTimestamp
return []*v1.Pod{
podWithNodeAffinity,
pod1,
pod2,
return []v1.Pod{
*podWithNodeAffinity,
*pod1,
*pod2,
}
}
var uint1 uint = 1
tests := []struct {
description string
nodes []*v1.Node
pods []*v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
description string
nodes []*v1.Node
pods []v1.Pod
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
}{
{
description: "Invalid strategy type, should not evict any pods",
@@ -124,116 +117,74 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
},
},
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is correctly scheduled on node, no eviction expected",
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
expectedEvictedPodCount: 0,
pods: addPodsToNode(nodeWithLabels, nil),
pods: addPodsToNode(nodeWithLabels),
nodes: []*v1.Node{nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, should be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxPodsToEvictPerNode set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxPodsToEvictPerNode: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, should not be evicted",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
},
{
description: "Pod is scheduled on node without matching labels, another schedulable node available, maxNoOfPodsToEvictPerNamespace set to 1, no pod evicted since pod terminting",
expectedEvictedPodCount: 1,
strategy: requiredDuringSchedulingIgnoredDuringExecutionStrategy,
pods: addPodsToNode(nodeWithoutLabels, &metav1.Time{}),
nodes: []*v1.Node{nodeWithoutLabels, nodeWithLabels},
maxNoOfPodsToEvictPerNamespace: &uint1,
maxPodsToEvictPerNode: 1,
},
{
description: "Pod is scheduled on node without matching labels, but no node where pod fits is available, should not evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithoutLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: 0,
},
{
description: "Pod is scheduled on node without matching labels, and node where pod fits is available, should evict",
expectedEvictedPodCount: 0,
strategy: requiredDuringSchedulingIgnoredDuringExecutionWithNodeFitStrategy,
pods: addPodsToNode(nodeWithoutLabels, nil),
pods: addPodsToNode(nodeWithoutLabels),
nodes: []*v1.Node{nodeWithLabels, unschedulableNodeWithLabels},
maxPodsToEvictPerNode: &uint1,
maxPodsToEvictPerNode: 1,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,
false,
false,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
false,
)
RemovePodsViolatingNodeAffinity(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}
}

View File

@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -49,18 +48,18 @@ func validateRemovePodsViolatingNodeTaintsParams(params *api.StrategyParameters)
}
// RemovePodsViolatingNodeTaints evicts pods on the node which violate NoSchedule Taints on nodes
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateRemovePodsViolatingNodeTaintsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingNodeTaints parameters")
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
labelSelector = strategy.Params.LabelSelector
}
@@ -78,20 +77,17 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil {
//no pods evicted as error encountered retrieving evictable Pods
return

View File

@@ -9,12 +9,10 @@ import (
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
@@ -47,10 +45,11 @@ func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
}
func TestDeletePodsViolatingNodeTaints(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node1 = addTaintsToNode(node1, "testTaint", "test", []int{1})
node2 := test.BuildTestNode("n2", 2000, 3000, 10, nil)
node2 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
node1 = addTaintsToNode(node2, "testingTaint", "testing", []int{1})
node3 := test.BuildTestNode("n3", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
@@ -117,168 +116,144 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
"datacenter": "west",
}
var uint1 uint = 1
tests := []struct {
description string
nodes []*v1.Node
pods []*v1.Pod
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
expectedEvictedPodCount uint
nodeFit bool
description string
nodes []*v1.Node
pods []v1.Pod
evictLocalStoragePods bool
evictSystemCriticalPods bool
maxPodsToEvictPerNode int
expectedEvictedPodCount int
nodeFit bool
}{
{
description: "Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p2 gets evicted
},
{
description: "Pods with tolerations but not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p3, p4},
pods: []v1.Pod{*p1, *p3, *p4},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p4 gets evicted
},
{
description: "Only <maxPodsToEvictPerNode> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
pods: []v1.Pod{*p1, *p5, *p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: &uint1,
maxPodsToEvictPerNode: 1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Only <maxNoOfPodsToEvictPerNamespace> number of Pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p1, p5, p6},
nodes: []*v1.Node{node1},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxNoOfPodsToEvictPerNamespace: &uint1,
expectedEvictedPodCount: 1, //p5 or p6 gets evicted
},
{
description: "Critical pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //nothing is evicted
},
{
description: "Critical pods except storage pods not tolerating node taint should not be evicted",
pods: []*v1.Pod{p7, p8, p9, p10},
pods: []v1.Pod{*p7, *p8, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: true,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p9 gets evicted
},
{
description: "Critical and non critical pods, only non critical pods not tolerating node taint should be evicted",
pods: []*v1.Pod{p7, p8, p10, p11},
pods: []v1.Pod{*p7, *p8, *p10, *p11},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 1, //p11 gets evicted
},
{
description: "Critical and non critical pods, pods not tolerating node taint should be evicted even if they are critical",
pods: []*v1.Pod{p2, p7, p9, p10},
nodes: []*v1.Node{node1, node2},
pods: []v1.Pod{*p2, *p7, *p9, *p10},
nodes: []*v1.Node{node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: true,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 2, //p2 and p7 are evicted
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but also doesn't tolerate taints on other nodes",
pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node2},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
{
description: "Pod p12 doesn't tolerate taint on it's node, but other nodes don't match it's selector",
pods: []*v1.Pod{p1, p3, p12},
pods: []v1.Pod{*p1, *p3, *p12},
nodes: []*v1.Node{node1, node3},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
{
description: "Pod p2 doesn't tolerate taint on it's node, but other nodes are unschedulable",
pods: []*v1.Pod{p1, p2, p3},
pods: []v1.Pod{*p1, *p2, *p3},
nodes: []*v1.Node{node1, node4},
evictLocalStoragePods: false,
evictSystemCriticalPods: false,
maxPodsToEvictPerNode: 0,
expectedEvictedPodCount: 0, //p2 gets evicted
nodeFit: true,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: tc.nodeFit,
},
}
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
tc.evictLocalStoragePods,
tc.evictSystemCriticalPods,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: tc.nodeFit,
},
}
RemovePodsViolatingNodeTaints(ctx, fakeClient, strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, Unexpected no of pods evicted: pods evicted: %d, expected: %d", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
}
}
func TestToleratesTaint(t *testing.T) {

View File

@@ -24,17 +24,15 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// HighNodeUtilization evicts pods from under utilized nodes so that scheduler can schedule according to its strategy.
// Note that CPU/Memory requests are used to calculate nodes' utilization and not the actual resource usage.
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func HighNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid HighNodeUtilization parameters")
return
@@ -63,7 +61,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
resourceNames := getResourceNames(targetThresholds)
sourceNodes, highNodes := classifyNodes(
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
func(node *v1.Node, usage NodeUsage) bool {
return isNodeWithLowUtilization(usage)
},
@@ -94,8 +92,8 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
klog.V(1).InfoS("No node is underutilized, nothing to do here, you might tune your thresholds further")
return
}
if len(sourceNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
if len(sourceNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(sourceNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}
if len(sourceNodes) == len(nodes) {

View File

@@ -19,24 +19,23 @@ package nodeutilization
import (
"context"
"fmt"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestHighNodeUtilization(t *testing.T) {
ctx := context.Background()
n1NodeName := "n1"
n2NodeName := "n2"
n3NodeName := "n3"
@@ -45,12 +44,13 @@ func TestHighNodeUtilization(t *testing.T) {
nodeSelectorValue := "west"
testCases := []struct {
name string
thresholds api.ResourceThresholds
nodes []*v1.Node
pods []*v1.Pod
expectedPodsEvicted uint
evictedPods []string
name string
thresholds api.ResourceThresholds
nodes map[string]*v1.Node
pods map[string]*v1.PodList
maxPodsToEvictPerNode int
expectedPodsEvicted int
evictedPods []string
}{
{
name: "no node below threshold usage",
@@ -58,26 +58,39 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20,
v1.ResourcePods: 20,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
// These won't be evicted.
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
},
expectedPodsEvicted: 0,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
name: "no evictable pods",
@@ -85,44 +98,57 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 40,
v1.ResourcePods: 40,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
// These won't be evicted.
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A pod with local storage.
test.SetNormalOwnerRef(pod)
pod.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
// A Mirror Pod.
pod.Annotations = test.GetMirrorPodAnnotation()
}),
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetDSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetDSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
},
expectedPodsEvicted: 0,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
name: "no node to schedule evicted pods",
@@ -130,21 +156,34 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20,
v1.ResourcePods: 20,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
// These can't be evicted.
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These can't be evicted.
test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
// These can't be evicted.
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n2NodeName: {
Items: []v1.Pod{
// These can't be evicted.
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p3", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n3NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
},
expectedPodsEvicted: 0,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
name: "without priorities",
@@ -152,29 +191,42 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
// A Critical Pod.
pod.Namespace = "kube-system"
priority := utils.SystemCriticalPriority
pod.Spec.Priority = &priority
}),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p7", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
},
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p7"},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p7"},
},
{
name: "without priorities stop when resource capacity is depleted",
@@ -182,79 +234,117 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 2000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p2", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p6", 400, 0, n3NodeName, test.SetRSOwnerRef),
},
},
},
expectedPodsEvicted: 1,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
},
{
name: "with priorities",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 2000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 2000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
// These won't be evicted.
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
// These won't be evicted.
test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, lowPriority)
}),
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodPriority(pod, highPriority)
}),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p7", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p8", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p9", 400, 0, n3NodeName, test.SetDSOwnerRef),
},
},
},
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
},
{
name: "without priorities evicting best-effort pods only",
thresholds: api.ResourceThresholds{
v1.ResourceCPU: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 3000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 3000, 3000, 5, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 3000, 3000, 10, test.SetNodeUnschedulable),
},
// All pods are assumed to be burstable (test.BuildTestNode always sets both cpu/memory resource requests to some value)
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
// These won't be evicted.
test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.MakeBestEffortPod(pod)
}),
*test.BuildTestPod("p2", 400, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
}),
},
},
n2NodeName: {
Items: []v1.Pod{
// These won't be evicted.
*test.BuildTestPod("p3", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 400, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 400, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
},
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
evictedPods: []string{"p1"},
},
{
name: "with extended resource",
@@ -262,44 +352,60 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 20,
extendedResource: 40,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8)
}),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8)
}),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
// These won't be evicted
test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
// These won't be evicted
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetDSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p3", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p4", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p5", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p6", 500, 0, n2NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
},
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p2"},
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 2,
evictedPods: []string{"p1", "p2"},
},
{
name: "with extended resource in some of nodes",
@@ -307,29 +413,41 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 40,
extendedResource: 40,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 10, func(node *v1.Node) {
test.SetNodeExtendedResource(node, extendedResource, 8)
}),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n3NodeName: test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
},
pods: []*v1.Pod{
//These won't be evicted
test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
//These won't be evicted
*test.BuildTestPod("p1", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
*test.BuildTestPod("p2", 100, 0, n1NodeName, func(pod *v1.Pod) {
test.SetRSOwnerRef(pod)
test.SetPodExtendedResourceRequest(pod, extendedResource, 1)
}),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p3", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p5", 500, 0, n2NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p6", 500, 0, n2NodeName, test.SetRSOwnerRef),
},
},
n3NodeName: {
Items: []v1.Pod{},
},
},
expectedPodsEvicted: 0,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
{
name: "Other node match pod node selector",
@@ -337,28 +455,37 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
},
},
},
expectedPodsEvicted: 1,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 1,
},
{
name: "Other node does not match pod node selector",
@@ -366,56 +493,67 @@ func TestHighNodeUtilization(t *testing.T) {
v1.ResourceCPU: 30,
v1.ResourcePods: 30,
},
nodes: []*v1.Node{
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
nodes: map[string]*v1.Node{
n1NodeName: test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
n2NodeName: test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
},
pods: []*v1.Pod{
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
pods: map[string]*v1.PodList{
n1NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
*test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetDSOwnerRef),
},
},
n2NodeName: {
Items: []v1.Pod{
*test.BuildTestPod("p5", 400, 0, n2NodeName, func(pod *v1.Pod) {
// A pod selecting nodes in the "west" datacenter
test.SetRSOwnerRef(pod)
pod.Spec.NodeSelector = map[string]string{
nodeSelectorKey: nodeSelectorValue,
}
}),
},
},
},
expectedPodsEvicted: 0,
maxPodsToEvictPerNode: 0,
expectedPodsEvicted: 0,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range testCase.nodes {
objs = append(objs, node)
}
for _, pod := range testCase.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
list := action.(core.ListAction)
fieldString := list.GetListRestrictions().Fields.String()
if strings.Contains(fieldString, n1NodeName) {
return true, test.pods[n1NodeName], nil
}
if strings.Contains(fieldString, n2NodeName) {
return true, test.pods[n2NodeName], nil
}
if strings.Contains(fieldString, n3NodeName) {
return true, test.pods[n3NodeName], nil
}
return true, nil, fmt.Errorf("Failed to list: %v", list)
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
if node, exists := test.nodes[getAction.GetName()]; exists {
return true, node, nil
}
return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
})
podsForEviction := make(map[string]struct{})
for _, pod := range testCase.evictedPods {
for _, pod := range test.evictedPods {
podsForEviction[pod] = struct{}{}
}
evictionFailed := false
if len(testCase.evictedPods) > 0 {
if len(test.evictedPods) > 0 {
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.CreateAction)
obj := getAction.GetObject()
@@ -430,41 +568,17 @@ func TestHighNodeUtilization(t *testing.T) {
})
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
//fakeClient := &fake.Clientset{}
//fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
// list := action.(core.ListAction)
// fieldString := list.GetListRestrictions().Fields.String()
// if strings.Contains(fieldString, n1NodeName) {
// return true, test.pods[n1NodeName], nil
// }
// if strings.Contains(fieldString, n2NodeName) {
// return true, test.pods[n2NodeName], nil
// }
// if strings.Contains(fieldString, n3NodeName) {
// return true, test.pods[n3NodeName], nil
// }
// return true, nil, fmt.Errorf("Failed to list: %v", list)
//})
//fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// getAction := action.(core.GetAction)
// if node, exists := testCase.nodes[getAction.GetName()]; exists {
// return true, node, nil
// }
// return true, nil, fmt.Errorf("Wrong node: %v", getAction.GetName())
//})
var nodes []*v1.Node
for _, node := range test.nodes {
nodes = append(nodes, node)
}
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
testCase.nodes,
false,
false,
test.maxPodsToEvictPerNode,
nodes,
false,
false,
false,
@@ -474,16 +588,16 @@ func TestHighNodeUtilization(t *testing.T) {
Enabled: true,
Params: &api.StrategyParameters{
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
Thresholds: testCase.thresholds,
Thresholds: test.thresholds,
},
NodeFit: true,
},
}
HighNodeUtilization(ctx, fakeClient, strategy, testCase.nodes, podEvictor, getPodsAssignedToNode)
HighNodeUtilization(ctx, fakeClient, strategy, nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if testCase.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %v pods to be evicted but %v got evicted", testCase.expectedPodsEvicted, podsEvicted)
if test.expectedPodsEvicted != podsEvicted {
t.Errorf("Expected %#v pods to be evicted but %#v got evicted", test.expectedPodsEvicted, podsEvicted)
}
if evictionFailed {
t.Errorf("Pod evictions failed unexpectedly")
@@ -560,6 +674,7 @@ func TestValidateHighNodeUtilizationStrategyConfig(t *testing.T) {
}
func TestHighNodeUtilizationWithTaints(t *testing.T) {
ctx := context.Background()
strategy := api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
@@ -595,7 +710,7 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
name string
nodes []*v1.Node
pods []*v1.Pod
evictionsExpected uint
evictionsExpected int
}{
{
name: "No taints",
@@ -637,9 +752,6 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
for _, item := range tests {
t.Run(item.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range item.nodes {
objs = append(objs, node)
@@ -650,32 +762,19 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
"policy/v1",
false,
&item.evictionsExpected,
nil,
item.evictionsExpected,
item.nodes,
false,
false,
false,
false,
false,
)
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor, getPodsAssignedToNode)
HighNodeUtilization(ctx, fakeClient, strategy, item.nodes, podEvictor)
if item.evictionsExpected != podEvictor.TotalEvicted() {
t.Errorf("Expected %v evictions, got %v", item.evictionsExpected, podEvictor.TotalEvicted())

View File

@@ -19,7 +19,6 @@ package nodeutilization
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
@@ -28,13 +27,12 @@ import (
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
)
// LowNodeUtilization evicts pods from overutilized nodes to underutilized nodes. Note that CPU/Memory requests are used
// to calculate nodes' utilization and not the actual resource usage.
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func LowNodeUtilization(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
// TODO: May be create a struct for the strategy as well, so that we don't have to pass along the all the params?
if err := validateNodeUtilizationParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid LowNodeUtilization parameters")
@@ -73,7 +71,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
resourceNames := getResourceNames(thresholds)
lowNodes, sourceNodes := classifyNodes(
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
getNodeUsage(ctx, client, nodes, thresholds, targetThresholds, resourceNames),
// The node has to be schedulable (to be able to move workload there)
func(node *v1.Node, usage NodeUsage) bool {
if nodeutil.IsNodeUnschedulable(node) {
@@ -120,8 +118,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
return
}
if len(lowNodes) <= strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
if len(lowNodes) < strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes {
klog.V(1).InfoS("Number of nodes underutilized is less than NumberOfNodes, nothing to do here", "underutilizedNodes", len(lowNodes), "numberOfNodes", strategy.Params.NodeResourceUtilizationThresholds.NumberOfNodes)
return
}

View File

@@ -19,16 +19,15 @@ package nodeutilization
import (
"context"
"fmt"
"sort"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sort"
)
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
@@ -78,15 +77,16 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
}
func getNodeUsage(
ctx context.Context,
client clientset.Interface,
nodes []*v1.Node,
lowThreshold, highThreshold api.ResourceThresholds,
resourceNames []v1.ResourceName,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) []NodeUsage {
var nodeUsageList []NodeUsage
for _, node := range nodes {
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
pods, err := podutil.ListPodsOnANode(ctx, client, node)
if err != nil {
klog.V(2).InfoS("Node will not be processed, error accessing its pods", "node", klog.KObj(node), "err", err)
continue

View File

@@ -20,7 +20,6 @@ import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
@@ -49,18 +48,18 @@ func validateRemovePodsViolatingInterPodAntiAffinityParams(params *api.StrategyP
}
// RemovePodsViolatingInterPodAntiAffinity evicts pods on the node which are having a pod affinity rules.
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateRemovePodsViolatingInterPodAntiAffinityParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingInterPodAntiAffinity parameters")
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
var labelSelector *metav1.LabelSelector
if strategy.Params != nil {
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
labelSelector = strategy.Params.LabelSelector
}
@@ -78,19 +77,16 @@ func RemovePodsViolatingInterPodAntiAffinity(ctx context.Context, client clients
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(labelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil {
return
}

View File

@@ -24,17 +24,16 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/utils"
"sigs.k8s.io/descheduler/test"
)
func TestPodAntiAffinity(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("n2", 2000, 3000, 10, func(node *v1.Node) {
node.ObjectMeta.Labels = map[string]string{
@@ -56,10 +55,6 @@ func TestPodAntiAffinity(t *testing.T) {
p6 := test.BuildTestPod("p6", 100, 0, node1.Name, nil)
p7 := test.BuildTestPod("p7", 100, 0, node1.Name, nil)
p8 := test.BuildTestPod("p8", 100, 0, node1.Name, nil)
p9 := test.BuildTestPod("p9", 100, 0, node1.Name, nil)
p10 := test.BuildTestPod("p10", 100, 0, node1.Name, nil)
p9.DeletionTimestamp = &metav1.Time{}
p10.DeletionTimestamp = &metav1.Time{}
criticalPriority := utils.SystemCriticalPriority
nonEvictablePod := test.BuildTestPod("non-evict", 100, 0, node1.Name, func(pod *v1.Pod) {
@@ -77,8 +72,6 @@ func TestPodAntiAffinity(t *testing.T) {
test.SetNormalOwnerRef(p5)
test.SetNormalOwnerRef(p6)
test.SetNormalOwnerRef(p7)
test.SetNormalOwnerRef(p9)
test.SetNormalOwnerRef(p10)
// set pod anti affinity
setPodAntiAffinity(p1, "foo", "bar")
@@ -87,8 +80,6 @@ func TestPodAntiAffinity(t *testing.T) {
setPodAntiAffinity(p5, "foo1", "bar1")
setPodAntiAffinity(p6, "foo1", "bar1")
setPodAntiAffinity(p7, "foo", "bar")
setPodAntiAffinity(p9, "foo", "bar")
setPodAntiAffinity(p10, "foo", "bar")
// set pod priority
test.SetPodPriority(p5, 100)
@@ -100,133 +91,98 @@ func TestPodAntiAffinity(t *testing.T) {
"datacenter": "west",
}
var uint1 uint = 1
var uint3 uint = 3
tests := []struct {
description string
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
pods []*v1.Pod
expectedEvictedPodCount uint
nodeFit bool
nodes []*v1.Node
description string
maxPodsToEvictPerNode int
pods []v1.Pod
expectedEvictedPodCount int
nodeFit bool
nodes []*v1.Node
}{
{
description: "Maximum pods to evict - 0",
pods: []*v1.Pod{p1, p2, p3, p4},
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict - 3",
maxPodsToEvictPerNode: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
maxPodsToEvictPerNode: 3,
pods: []v1.Pod{*p1, *p2, *p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Maximum pods to evict (maxPodsToEvictPerNamespace=3) - 3",
maxNoOfPodsToEvictPerNamespace: &uint3,
pods: []*v1.Pod{p1, p2, p3, p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
},
{
description: "Evict only 1 pod after sorting",
pods: []*v1.Pod{p5, p6, p7},
maxPodsToEvictPerNode: 0,
pods: []v1.Pod{*p5, *p6, *p7},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
maxPodsToEvictPerNode: 1,
pods: []v1.Pod{*p1, *nonEvictablePod},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Evicts pod that conflicts with critical pod (but does not evict critical pod)",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p1, nonEvictablePod},
maxPodsToEvictPerNode: 1,
pods: []v1.Pod{*p1, *nonEvictablePod},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "Won't evict pods because node selectors don't match available nodes",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
maxPodsToEvictPerNode: 1,
pods: []v1.Pod{*p8, *nonEvictablePod},
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "Won't evict pods because only other node is not schedulable",
maxPodsToEvictPerNode: &uint1,
pods: []*v1.Pod{p8, nonEvictablePod},
maxPodsToEvictPerNode: 1,
pods: []v1.Pod{*p8, *nonEvictablePod},
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
nodeFit: true,
},
{
description: "No pod to evicted since all pod terminating",
pods: []*v1.Pod{p9, p10},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range test.nodes {
objs = append(objs, node)
}
for _, pod := range test.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.maxNoOfPodsToEvictPerNamespace,
test.nodes,
false,
false,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: test.nodeFit,
},
}
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != test.expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
}
// create fake client
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: test.pods}, nil
})
fakeClient.Fake.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
return true, node1, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
test.maxPodsToEvictPerNode,
test.nodes,
false,
false,
false,
)
strategy := api.DeschedulerStrategy{
Params: &api.StrategyParameters{
NodeFit: test.nodeFit,
},
}
RemovePodsViolatingInterPodAntiAffinity(ctx, fakeClient, strategy, test.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != test.expectedEvictedPodCount {
t.Errorf("Unexpected no of pods evicted: pods evicted: %d, expected: %d", podsEvicted, test.expectedEvictedPodCount)
}
}
}

View File

@@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -57,7 +56,7 @@ func validatePodLifeTimeParams(params *api.StrategyParameters) error {
}
// PodLifeTime evicts pods on nodes that were created more than strategy.Params.MaxPodLifeTimeSeconds seconds ago.
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validatePodLifeTimeParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid PodLifeTime parameters")
return
@@ -69,10 +68,10 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority))
@@ -89,21 +88,10 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
}
}
podFilter, err := podutil.NewOptions().
WithFilter(filter).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods := listOldPodsOnNode(node.Name, getPodsAssignedToNode, podFilter, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds)
pods := listOldPodsOnNode(ctx, client, node, includedNamespaces, excludedNamespaces, strategy.Params.LabelSelector, *strategy.Params.PodLifeTime.MaxPodLifeTimeSeconds, filter)
for _, pod := range pods {
success, err := podEvictor.EvictPod(ctx, pod, node, "PodLifeTime")
if success {
@@ -120,12 +108,23 @@ func PodLifeTime(ctx context.Context, client clientset.Interface, strategy api.D
}
func listOldPodsOnNode(
nodeName string,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
filter podutil.FilterFunc,
ctx context.Context,
client clientset.Interface,
node *v1.Node,
includedNamespaces, excludedNamespaces []string,
labelSelector *metav1.LabelSelector,
maxPodLifeTimeSeconds uint,
filter func(pod *v1.Pod) bool,
) []*v1.Pod {
pods, err := podutil.ListPodsOnANode(nodeName, getPodsAssignedToNode, filter)
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(filter),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(labelSelector),
)
if err != nil {
return nil
}

View File

@@ -25,16 +25,15 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
func TestPodLifeTime(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("n1", 2000, 3000, 10, nil)
olderPodCreationTime := metav1.NewTime(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC))
newerPodCreationTime := metav1.NewTime(time.Now())
@@ -126,24 +125,14 @@ func TestPodLifeTime(t *testing.T) {
p12.ObjectMeta.OwnerReferences = ownerRef1
p13.ObjectMeta.OwnerReferences = ownerRef1
p14 := test.BuildTestPod("p14", 100, 0, node1.Name, nil)
p15 := test.BuildTestPod("p15", 100, 0, node1.Name, nil)
p14.Namespace = "dev"
p15.Namespace = "dev"
p14.ObjectMeta.CreationTimestamp = olderPodCreationTime
p15.ObjectMeta.CreationTimestamp = olderPodCreationTime
p14.ObjectMeta.OwnerReferences = ownerRef1
p15.ObjectMeta.OwnerReferences = ownerRef1
p14.DeletionTimestamp = &metav1.Time{}
p15.DeletionTimestamp = &metav1.Time{}
var maxLifeTime uint = 600
testCases := []struct {
description string
strategy api.DeschedulerStrategy
pods []*v1.Pod
maxPodsToEvictPerNode int
pods []v1.Pod
nodes []*v1.Node
expectedEvictedPodCount uint
expectedEvictedPodCount int
ignorePvcPods bool
}{
{
@@ -154,7 +143,8 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p1, p2},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p1, *p2},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
@@ -166,7 +156,8 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p3, p4},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p3, *p4},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
@@ -178,7 +169,8 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p5, p6},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p5, *p6},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
@@ -190,7 +182,8 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p7, p8},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p7, *p8},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
@@ -205,7 +198,8 @@ func TestPodLifeTime(t *testing.T) {
},
},
},
pods: []*v1.Pod{p9, p10},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p9, *p10},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
@@ -217,7 +211,8 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p11},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
ignorePvcPods: true,
@@ -230,12 +225,13 @@ func TestPodLifeTime(t *testing.T) {
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
},
},
pods: []*v1.Pod{p11},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p11},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "No pod to evicted since all pod terminating",
description: "Two old pods with different labels, 1 selected by labelSelector",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
@@ -245,71 +241,36 @@ func TestPodLifeTime(t *testing.T) {
},
},
},
pods: []*v1.Pod{p12, p13},
maxPodsToEvictPerNode: 5,
pods: []v1.Pod{*p12, *p13},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
},
{
description: "No pod should be evicted since pod terminating",
strategy: api.DeschedulerStrategy{
Enabled: true,
Params: &api.StrategyParameters{
PodLifeTime: &api.PodLifeTime{MaxPodLifeTimeSeconds: &maxLifeTime},
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
},
},
pods: []*v1.Pod{p14, p15},
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
fakeClient := &fake.Clientset{}
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
nil,
nil,
tc.nodes,
false,
false,
tc.ignorePvcPods,
false,
false,
)
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: tc.pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
tc.ignorePvcPods,
)
PodLifeTime(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.description, tc.expectedEvictedPodCount, podsEvicted)
}
}
}

View File

@@ -21,7 +21,6 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@@ -50,7 +49,7 @@ func validateRemovePodsHavingTooManyRestartsParams(params *api.StrategyParameter
// RemovePodsHavingTooManyRestarts removes the pods that have too many restarts on node.
// There are too many cases leading this issue: Volume mount failed, app error due to nodes' different settings.
// As of now, this strategy won't evict daemonsets, mirror pods, critical pods and pods with local storages.
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc) {
func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor) {
if err := validateRemovePodsHavingTooManyRestartsParams(strategy.Params); err != nil {
klog.ErrorS(err, "Invalid RemovePodsHavingTooManyRestarts parameters")
return
@@ -62,10 +61,10 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
return
}
var includedNamespaces, excludedNamespaces sets.String
var includedNamespaces, excludedNamespaces []string
if strategy.Params.Namespaces != nil {
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
includedNamespaces = strategy.Params.Namespaces.Include
excludedNamespaces = strategy.Params.Namespaces.Exclude
}
nodeFit := false
@@ -75,20 +74,17 @@ func RemovePodsHavingTooManyRestarts(ctx context.Context, client clientset.Inter
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
podFilter, err := podutil.NewOptions().
WithFilter(evictable.IsEvictable).
WithNamespaces(includedNamespaces).
WithoutNamespaces(excludedNamespaces).
WithLabelSelector(strategy.Params.LabelSelector).
BuildFilterFunc()
if err != nil {
klog.ErrorS(err, "Error initializing pod filter function")
return
}
for _, node := range nodes {
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
pods, err := podutil.ListPodsOnANode(
ctx,
client,
node,
podutil.WithFilter(evictable.IsEvictable),
podutil.WithNamespaces(includedNamespaces),
podutil.WithoutNamespaces(excludedNamespaces),
podutil.WithLabelSelector(strategy.Params.LabelSelector),
)
if err != nil {
klog.ErrorS(err, "Error listing a nodes pods", "node", klog.KObj(node))
continue

View File

@@ -18,24 +18,23 @@ package strategies
import (
"context"
"fmt"
"testing"
"fmt"
v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
func initPods(node *v1.Node) []*v1.Pod {
pods := make([]*v1.Pod, 0)
func initPods(node *v1.Node) []v1.Pod {
pods := make([]v1.Pod, 0)
for i := int32(0); i <= 9; i++ {
pod := test.BuildTestPod(fmt.Sprintf("pod-%d", i), 100, 0, node.Name, nil)
@@ -57,7 +56,7 @@ func initPods(node *v1.Node) []*v1.Pod {
},
},
}
pods = append(pods, pod)
pods = append(pods, *pod)
}
// The following 3 pods won't get evicted.
@@ -82,6 +81,8 @@ func initPods(node *v1.Node) []*v1.Pod {
}
func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
ctx := context.Background()
node1 := test.BuildTestNode("node1", 2000, 3000, 10, nil)
node2 := test.BuildTestNode("node2", 2000, 3000, 10, func(node *v1.Node) {
node.Spec.Taints = []v1.Taint{
@@ -113,139 +114,115 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
}
}
var uint3 uint = 3
tests := []struct {
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount uint
maxPodsToEvictPerNode *uint
maxNoOfPodsToEvictPerNamespace *uint
description string
nodes []*v1.Node
strategy api.DeschedulerStrategy
expectedEvictedPodCount int
maxPodsToEvictPerNode int
}{
{
description: "All pods have total restarts under threshold, no pod evictions",
strategy: createStrategy(true, true, 10000, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: 0,
},
{
description: "Some pods have total restarts bigger than threshold",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*25, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 5 pod evictions",
strategy: createStrategy(true, false, 1*25, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 5,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(includingInitContainers=true), 6 pod evictions",
strategy: createStrategy(true, true, 1*20, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Nine pods have total restarts equals threshold(includingInitContainers=false), 6 pod evictions",
strategy: createStrategy(true, false, 1*20, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 6,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=true), but only 1 pod eviction",
strategy: createStrategy(true, true, 5*25+1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "Five pods have total restarts bigger than threshold(includingInitContainers=false), but only 1 pod eviction",
strategy: createStrategy(true, false, 5*20+1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 1,
maxPodsToEvictPerNode: 0,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxPodsToEvictPerNode: &uint3,
},
{
description: "All pods have total restarts equals threshold(maxNoOfPodsToEvictPerNamespace=3), 3 pod evictions",
strategy: createStrategy(true, true, 1, false),
nodes: []*v1.Node{node1},
expectedEvictedPodCount: 3,
maxNoOfPodsToEvictPerNamespace: &uint3,
maxPodsToEvictPerNode: 3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is tained, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node2},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
maxPodsToEvictPerNode: 3,
},
{
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node is not schedulable, 0 pod evictions",
strategy: createStrategy(true, true, 1, true),
nodes: []*v1.Node{node1, node3},
expectedEvictedPodCount: 0,
maxPodsToEvictPerNode: &uint3,
maxPodsToEvictPerNode: 3,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range pods {
objs = append(objs, pod)
}
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.maxNoOfPodsToEvictPerNamespace,
tc.nodes,
false,
false,
false,
false,
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.PodList{Items: pods}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
policyv1.SchemeGroupVersion.String(),
false,
tc.maxPodsToEvictPerNode,
tc.nodes,
false,
false,
false,
)
RemovePodsHavingTooManyRestarts(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test %#v failed, expected %v pod evictions, but got %v pod evictions\n", tc.description, tc.expectedEvictedPodCount, actualEvictedPodCount)
}
}
}

View File

@@ -22,18 +22,17 @@ import (
"math"
"sort"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
"sigs.k8s.io/descheduler/pkg/utils"
)
@@ -48,24 +47,70 @@ type topology struct {
pods []*v1.Pod
}
// topologySpreadStrategyParams contains validated strategy parameters
type topologySpreadStrategyParams struct {
thresholdPriority int32
includedNamespaces sets.String
excludedNamespaces sets.String
labelSelector labels.Selector
nodeFit bool
}
// validateAndParseTopologySpreadParams will validate parameters to ensure that they do not contain invalid values.
func validateAndParseTopologySpreadParams(ctx context.Context, client clientset.Interface, params *api.StrategyParameters) (*topologySpreadStrategyParams, error) {
var includedNamespaces, excludedNamespaces sets.String
if params == nil {
return &topologySpreadStrategyParams{includedNamespaces: includedNamespaces, excludedNamespaces: excludedNamespaces}, nil
}
// At most one of include/exclude can be set
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return nil, fmt.Errorf("only one of thresholdPriority and thresholdPriorityClassName can be set")
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
if err != nil {
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
}
if params.Namespaces != nil {
includedNamespaces = sets.NewString(params.Namespaces.Include...)
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
}
var selector labels.Selector
if params.LabelSelector != nil {
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return &topologySpreadStrategyParams{
thresholdPriority: thresholdPriority,
includedNamespaces: includedNamespaces,
excludedNamespaces: excludedNamespaces,
labelSelector: selector,
nodeFit: params.NodeFit,
}, nil
}
func RemovePodsViolatingTopologySpreadConstraint(
ctx context.Context,
client clientset.Interface,
strategy api.DeschedulerStrategy,
nodes []*v1.Node,
podEvictor *evictions.PodEvictor,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
strategyParams, err := validation.ValidateAndParseStrategyParams(ctx, client, strategy.Params)
strategyParams, err := validateAndParseTopologySpreadParams(ctx, client, strategy.Params)
if err != nil {
klog.ErrorS(err, "Invalid RemovePodsViolatingTopologySpreadConstraint parameters")
return
}
evictable := podEvictor.Evictable(
evictions.WithPriorityThreshold(strategyParams.ThresholdPriority),
evictions.WithNodeFit(strategyParams.NodeFit),
evictions.WithLabelSelector(strategyParams.LabelSelector),
evictions.WithPriorityThreshold(strategyParams.thresholdPriority),
evictions.WithNodeFit(strategyParams.nodeFit),
evictions.WithLabelSelector(strategyParams.labelSelector),
)
nodeMap := make(map[string]*v1.Node, len(nodes))
@@ -95,8 +140,8 @@ func RemovePodsViolatingTopologySpreadConstraint(
podsForEviction := make(map[*v1.Pod]struct{})
// 1. for each namespace...
for _, namespace := range namespaces.Items {
if (len(strategyParams.IncludedNamespaces) > 0 && !strategyParams.IncludedNamespaces.Has(namespace.Name)) ||
(len(strategyParams.ExcludedNamespaces) > 0 && strategyParams.ExcludedNamespaces.Has(namespace.Name)) {
if (len(strategyParams.includedNamespaces) > 0 && !strategyParams.includedNamespaces.Has(namespace.Name)) ||
(len(strategyParams.excludedNamespaces) > 0 && strategyParams.excludedNamespaces.Has(namespace.Name)) {
continue
}
namespacePods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
@@ -141,10 +186,6 @@ func RemovePodsViolatingTopologySpreadConstraint(
// (this loop is where we count the number of pods per topologyValue that match this constraint's selector)
var sumPods float64
for i := range namespacePods.Items {
// skip pods that are being deleted.
if utils.IsPodTerminating(&namespacePods.Items[i]) {
continue
}
// 4. if the pod matches this TopologySpreadConstraint LabelSelector
if !selector.Matches(labels.Set(namespacePods.Items[i].Labels)) {
continue
@@ -240,7 +281,7 @@ func balanceDomains(
j := len(sortedDomains) - 1
for i < j {
// if j has no more to give without falling below the ideal average, move to next aboveAvg
if float64(len(sortedDomains[j].pods)) <= idealAvg {
if float64(len(sortedDomains[j].pods)) < idealAvg {
j--
}

View File

@@ -5,23 +5,23 @@ import (
"fmt"
"testing"
"sigs.k8s.io/descheduler/pkg/api"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
core "k8s.io/client-go/testing"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/test"
)
func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
testCases := []struct {
name string
pods []*v1.Pod
expectedEvictedCount uint
expectedEvictedCount int
nodes []*v1.Node
strategy api.DeschedulerStrategy
namespaces []string
@@ -588,62 +588,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{IncludeSoftConstraints: true}},
namespaces: []string{"ns1"},
},
{
name: "3 domains size [8 7 0], maxSkew=1, should move 5 to get [5 5 5]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 8,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 7,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 5,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "3 domains size [5 5 5], maxSkew=1, should move 0 to retain [5 5 5]",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
test.BuildTestNode("n3", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneC" }),
},
pods: createTestPods([]testPodList{
{
count: 5,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 5,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 5,
node: "n3",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod since pod tolerates the node with taint",
nodes: []*v1.Node{
@@ -832,83 +776,33 @@ func TestTopologySpreadConstraint(t *testing.T) {
},
namespaces: []string{"ns1"},
},
{
name: "2 domains, sizes [4,2], maxSkew=1, 2 pods in termination; nothing should be moved",
nodes: []*v1.Node{
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
},
pods: createTestPods([]testPodList{
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
{
count: 2,
node: "n1",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
deletionTimestamp: &metav1.Time{},
},
{
count: 2,
node: "n2",
labels: map[string]string{"foo": "bar"},
constraints: getDefaultTopologyConstraints(1),
},
}),
expectedEvictedCount: 0,
strategy: api.DeschedulerStrategy{
Params: &api.StrategyParameters{
LabelSelector: getLabelSelector("foo", []string{"bar"}, metav1.LabelSelectorOpIn),
},
},
namespaces: []string{"ns1"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []runtime.Object
for _, node := range tc.nodes {
objs = append(objs, node)
}
for _, pod := range tc.pods {
objs = append(objs, pod)
}
objs = append(objs, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns1"}})
fakeClient := fake.NewSimpleClientset(objs...)
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("Build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(ctx.Done())
sharedInformerFactory.WaitForCacheSync(ctx.Done())
fakeClient := &fake.Clientset{}
fakeClient.Fake.AddReactor("list", "pods", func(action core.Action) (bool, runtime.Object, error) {
podList := make([]v1.Pod, 0, len(tc.pods))
for _, pod := range tc.pods {
podList = append(podList, *pod)
}
return true, &v1.PodList{Items: podList}, nil
})
fakeClient.Fake.AddReactor("list", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.NamespaceList{Items: []v1.Namespace{{ObjectMeta: metav1.ObjectMeta{Name: "ns1", Namespace: "ns1"}}}}, nil
})
podEvictor := evictions.NewPodEvictor(
fakeClient,
"v1",
false,
nil,
nil,
100,
tc.nodes,
false,
false,
false,
false,
false,
)
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor, getPodsAssignedToNode)
RemovePodsViolatingTopologySpreadConstraint(ctx, fakeClient, tc.strategy, tc.nodes, podEvictor)
podsEvicted := podEvictor.TotalEvicted()
if podsEvicted != tc.expectedEvictedCount {
t.Errorf("Test error for description: %s. Expected evicted pods count %v, got %v", tc.name, tc.expectedEvictedCount, podsEvicted)
@@ -918,15 +812,14 @@ func TestTopologySpreadConstraint(t *testing.T) {
}
type testPodList struct {
count int
node string
labels map[string]string
constraints []v1.TopologySpreadConstraint
nodeSelector map[string]string
nodeAffinity *v1.Affinity
noOwners bool
tolerations []v1.Toleration
deletionTimestamp *metav1.Time
count int
node string
labels map[string]string
constraints []v1.TopologySpreadConstraint
nodeSelector map[string]string
nodeAffinity *v1.Affinity
noOwners bool
tolerations []v1.Toleration
}
func createTestPods(testPods []testPodList) []*v1.Pod {
@@ -947,7 +840,6 @@ func createTestPods(testPods []testPodList) []*v1.Pod {
p.Spec.NodeSelector = tp.nodeSelector
p.Spec.Affinity = tp.nodeAffinity
p.Spec.Tolerations = tp.tolerations
p.ObjectMeta.DeletionTimestamp = tp.deletionTimestamp
}))
podNum++
}

View File

@@ -1,71 +0,0 @@
package validation
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/utils"
)
// ValidatedStrategyParams contains validated common strategy parameters
type ValidatedStrategyParams struct {
ThresholdPriority int32
IncludedNamespaces sets.String
ExcludedNamespaces sets.String
LabelSelector labels.Selector
NodeFit bool
}
func DefaultValidatedStrategyParams() ValidatedStrategyParams {
return ValidatedStrategyParams{ThresholdPriority: utils.SystemCriticalPriority}
}
func ValidateAndParseStrategyParams(
ctx context.Context,
client clientset.Interface,
params *api.StrategyParameters,
) (*ValidatedStrategyParams, error) {
if params == nil {
defaultValidatedStrategyParams := DefaultValidatedStrategyParams()
return &defaultValidatedStrategyParams, nil
}
// At most one of include/exclude can be set
var includedNamespaces, excludedNamespaces sets.String
if params.Namespaces != nil && len(params.Namespaces.Include) > 0 && len(params.Namespaces.Exclude) > 0 {
return nil, fmt.Errorf("only one of Include/Exclude namespaces can be set")
}
if params.ThresholdPriority != nil && params.ThresholdPriorityClassName != "" {
return nil, fmt.Errorf("only one of ThresholdPriority and thresholdPriorityClassName can be set")
}
thresholdPriority, err := utils.GetPriorityFromStrategyParams(ctx, client, params)
if err != nil {
return nil, fmt.Errorf("failed to get threshold priority from strategy's params: %+v", err)
}
if params.Namespaces != nil {
includedNamespaces = sets.NewString(params.Namespaces.Include...)
excludedNamespaces = sets.NewString(params.Namespaces.Exclude...)
}
var selector labels.Selector
if params.LabelSelector != nil {
selector, err = metav1.LabelSelectorAsSelector(params.LabelSelector)
if err != nil {
return nil, fmt.Errorf("failed to get label selectors from strategy's params: %+v", err)
}
}
return &ValidatedStrategyParams{
ThresholdPriority: thresholdPriority,
IncludedNamespaces: includedNamespaces,
ExcludedNamespaces: excludedNamespaces,
LabelSelector: selector,
NodeFit: params.NodeFit,
}, nil
}

View File

@@ -1,79 +0,0 @@
package validation
import (
"context"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"sigs.k8s.io/descheduler/pkg/api"
)
var (
thresholdPriority int32 = 1000
)
func TestValidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{name: "validate nil params", params: nil},
{name: "validate empty params", params: &api.StrategyParameters{}},
{name: "validate params with NodeFit", params: &api.StrategyParameters{NodeFit: true}},
{name: "validate params with ThresholdPriority", params: &api.StrategyParameters{ThresholdPriority: &thresholdPriority}},
{name: "validate params with priorityClassName", params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority"}},
{name: "validate params with excluded namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Exclude: []string{"excluded-ns"}}}},
{name: "validate params with included namespace", params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}}}},
{name: "validate params with empty label selector", params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{}}},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err != nil {
t.Errorf("strategy params should be valid but got err: %v", err.Error())
}
if params == nil {
t.Errorf("strategy params should return a strategyParams but got nil")
}
})
}
}
func TestInvalidStrategyParams(t *testing.T) {
ctx := context.Background()
fakeClient := &fake.Clientset{}
testCases := []struct {
name string
params *api.StrategyParameters
}{
{
name: "invalid params with both included and excluded namespaces nil params",
params: &api.StrategyParameters{Namespaces: &api.Namespaces{Include: []string{"include-ns"}, Exclude: []string{"exclude-ns"}}},
},
{
name: "invalid params with both threshold priority and priority class name",
params: &api.StrategyParameters{ThresholdPriorityClassName: "high-priority", ThresholdPriority: &thresholdPriority},
},
{
name: "invalid params with bad label selector",
params: &api.StrategyParameters{LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"": "missing-label"}}},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
params, err := ValidateAndParseStrategyParams(ctx, fakeClient, tc.params)
if err == nil {
t.Errorf("strategy params should be invalid but did not get err")
}
if params != nil {
t.Errorf("strategy params should return a nil strategyParams but got %v", params)
}
})
}
}

View File

@@ -89,11 +89,6 @@ func IsMirrorPod(pod *v1.Pod) bool {
return ok
}
// IsPodTerminating returns true if the pod DeletionTimestamp is set.
func IsPodTerminating(pod *v1.Pod) bool {
return pod.DeletionTimestamp != nil
}
// IsStaticPod returns true if the pod is a static pod.
func IsStaticPod(pod *v1.Pod) bool {
source, err := GetPodSource(pod)

View File

@@ -1,203 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
func TestRemoveDuplicates(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
t.Log("Creating duplicates pods")
deploymentObj := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "duplicate-pod",
Namespace: testNamespace.Name,
Labels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}},
},
},
},
}
tests := []struct {
description string
replicasNum int
beforeFunc func(deployment *appsv1.Deployment)
expectedEvictedPodCount uint
}{
{
description: "Evict Pod even Pods schedule to specific node",
replicasNum: 4,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(4)
deployment.Spec.Template.Spec.NodeName = workerNodes[0].Name
},
expectedEvictedPodCount: 2,
},
{
description: "Evict Pod even Pods with local storage",
replicasNum: 5,
beforeFunc: func(deployment *appsv1.Deployment) {
deployment.Spec.Replicas = func(i int32) *int32 { return &i }(5)
deployment.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
},
},
}
},
expectedEvictedPodCount: 2,
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
t.Logf("Creating deployment %v in %v namespace", deploymentObj.Name, deploymentObj.Namespace)
tc.beforeFunc(deploymentObj)
_, err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).Create(ctx, deploymentObj, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating deployment: %v", err)
if err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"})).String(),
}); err != nil {
t.Fatalf("Unable to delete deployment: %v", err)
}
return
}
defer clientSet.AppsV1().Deployments(deploymentObj.Namespace).Delete(ctx, deploymentObj.Name, metav1.DeleteOptions{})
waitForPodsRunning(ctx, t, clientSet, map[string]string{"app": "test-duplicate", "name": "test-duplicatePods"}, tc.replicasNum, testNamespace.Name)
// Run DeschedulerStrategy strategy
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group %v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
t.Log("Running DeschedulerStrategy strategy")
strategies.RemoveDuplicatePods(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
RemoveDuplicates: &deschedulerapi.RemoveDuplicates{},
},
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount != tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Unexpected number of pods have been evicted, got %v, expected %v", tc.description, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
})
}
}
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}

View File

@@ -1,156 +0,0 @@
package e2e
import (
"context"
"strings"
"testing"
"time"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
var oneHourPodLifetimeSeconds uint = 3600
func TestFailedPods(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, _ := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
expectedEvictedCount uint
strategyParams *deschedulerapi.StrategyParameters
}{
"test-failed-pods-nil-strategy": {
expectedEvictedCount: 1,
strategyParams: nil,
},
"test-failed-pods-default-strategy": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{},
},
"test-failed-pods-default-failed-pods": {
expectedEvictedCount: 1,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{},
},
},
"test-failed-pods-reason-unmatched": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{Reasons: []string{"ReasonDoesNotMatch"}},
},
},
"test-failed-pods-min-age-unmet": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{MinPodLifetimeSeconds: &oneHourPodLifetimeSeconds},
},
},
"test-failed-pods-exclude-job-kind": {
expectedEvictedCount: 0,
strategyParams: &deschedulerapi.StrategyParameters{
FailedPods: &deschedulerapi.FailedPods{ExcludeOwnerKinds: []string{"Job"}},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
job := initFailedJob(name, testNamespace.Namespace)
t.Logf("Creating job %s in %s namespace", job.Name, job.Namespace)
jobClient := clientSet.BatchV1().Jobs(testNamespace.Name)
if _, err := jobClient.Create(ctx, job, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating Job %s: %v", name, err)
}
deletePropagationPolicy := metav1.DeletePropagationForeground
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
t.Logf("Running RemoveFailedPods strategy for %s", name)
strategies.RemoveFailedPods(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: tc.strategyParams,
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemoveFailedPods strategy for %s", name)
if actualEvictedCount := podEvictor.TotalEvicted(); actualEvictedCount == tc.expectedEvictedCount {
t.Logf("Total of %d Pods were evicted for %s", actualEvictedCount, name)
} else {
t.Errorf("Unexpected number of pods have been evicted, got %v, expected %v", actualEvictedCount, tc.expectedEvictedCount)
}
})
}
}
func initFailedJob(name, namespace string) *batchv1.Job {
podSpec := MakePodSpec("", nil)
podSpec.Containers[0].Command = []string{"/bin/false"}
podSpec.RestartPolicy = v1.RestartPolicyNever
labelsSet := labels.Set{"test": name, "name": name}
jobBackoffLimit := int32(0)
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Labels: labelsSet,
Name: name,
Namespace: namespace,
},
Spec: batchv1.JobSpec{
Template: v1.PodTemplateSpec{
Spec: podSpec,
ObjectMeta: metav1.ObjectMeta{Labels: labelsSet},
},
BackoffLimit: &jobBackoffLimit,
},
}
}
func waitForJobPodPhase(ctx context.Context, t *testing.T, clientSet clientset.Interface, job *batchv1.Job, phase v1.PodPhase) {
podClient := clientSet.CoreV1().Pods(job.Namespace)
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
t.Log(labels.FormatLabels(job.Labels))
if podList, err := podClient.List(ctx, metav1.ListOptions{LabelSelector: labels.FormatLabels(job.Labels)}); err != nil {
return false, err
} else {
if len(podList.Items) == 0 {
t.Logf("Job controller has not created Pod for job %s yet", job.Name)
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != phase {
t.Logf("Pod %v not in %s phase yet, is %v instead", pod.Name, phase, pod.Status.Phase)
return false, nil
}
}
t.Logf("Job %v Pod is in %s phase now", job.Name, phase)
return true, nil
}
}); err != nil {
t.Fatalf("Error waiting for pods in %s phase: %v", phase, err)
}
}

View File

@@ -62,11 +62,11 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
v1.ResourceMemory: resource.MustParse("1000Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceMemory: resource.MustParse("800Mi"),
},
},
}},
@@ -75,7 +75,7 @@ func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
}
}
// RcByNameContainer returns a ReplicationController with specified name and container
// RcByNameContainer returns a ReplicationControoler with specified name and container
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
zeroGracePeriod := int64(0)
@@ -108,7 +108,7 @@ func RcByNameContainer(name, namespace string, replicas int32, labels map[string
}
}
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, chan struct{}) {
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
@@ -117,41 +117,12 @@ func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInfo
stopChannel := make(chan struct{})
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
waitForNodesReady(context.Background(), t, clientSet, nodeInformer)
return clientSet, nodeInformer, getPodsAssignedToNode, stopChannel
}
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
func waitForNodesReady(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
readyNodes, err := nodeutil.ReadyNodes(ctx, clientSet, nodeInformer, "")
if err != nil {
return false, err
}
if len(nodeList.Items) != len(readyNodes) {
t.Logf("%v/%v nodes are ready. Waiting for all nodes to be ready...", len(readyNodes), len(nodeList.Items))
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for nodes to be ready: %v", err)
}
return clientSet, nodeInformer, stopChannel
}
func runPodLifetimeStrategy(
@@ -164,7 +135,6 @@ func runPodLifetimeStrategy(
priority *int32,
evictCritical bool,
labelSelector *metav1.LabelSelector,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
@@ -196,16 +166,12 @@ func runPodLifetimeStrategy(
clientset,
evictionPolicyGroupVersion,
false,
nil,
nil,
0,
nodes,
false,
evictCritical,
false,
false,
false,
),
getPodsAssignedToNode,
)
}
@@ -235,7 +201,7 @@ func intersectStrings(lista, listb []string) []string {
func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, _, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -243,7 +209,15 @@ func TestLowNodeUtilization(t *testing.T) {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
var nodes []*v1.Node
var workerNodes []*v1.Node
for i := range nodeList.Items {
node := nodeList.Items[i]
nodes = append(nodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists {
workerNodes = append(workerNodes, &node)
}
}
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -324,14 +298,22 @@ func TestLowNodeUtilization(t *testing.T) {
waitForRCPodsRunning(ctx, t, clientSet, rc)
// Run LowNodeUtilization strategy
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("%v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
0,
nodes,
true,
false,
false,
)
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
@@ -356,17 +338,11 @@ func TestLowNodeUtilization(t *testing.T) {
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(ctx, clientSet, workerNodes[0], podutil.WithFilter(podEvictor.Evictable().IsEvictable))
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
@@ -383,7 +359,7 @@ func TestLowNodeUtilization(t *testing.T) {
func TestNamespaceConstraintsInclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -418,7 +394,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Include: []string{rc.Namespace},
}, "", nil, false, nil, getPodsAssignedToNode)
}, "", nil, false, nil)
// All pods are supposed to be deleted, wait until all the old pods are deleted
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
@@ -454,7 +430,7 @@ func TestNamespaceConstraintsInclude(t *testing.T) {
func TestNamespaceConstraintsExclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -489,7 +465,7 @@ func TestNamespaceConstraintsExclude(t *testing.T) {
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Exclude: []string{rc.Namespace},
}, "", nil, false, nil, getPodsAssignedToNode)
}, "", nil, false, nil)
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
@@ -521,7 +497,7 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500)
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -602,9 +578,9 @@ func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
t.Logf("Existing pods: %v", initialPodNames)
if isPriorityClass {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil)
} else {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil)
}
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
@@ -651,7 +627,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
var lowPriority = int32(500)
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -721,10 +697,10 @@ func testPriority(t *testing.T, isPriorityClass bool) {
if isPriorityClass {
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil)
} else {
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil)
}
t.Logf("Waiting 10s")
@@ -780,7 +756,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
func TestPodLabelSelector(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
@@ -828,7 +804,7 @@ func TestPodLabelSelector(t *testing.T) {
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}})
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
@@ -883,7 +859,7 @@ func TestPodLabelSelector(t *testing.T) {
func TestEvictAnnotation(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
clientSet, nodeInformer, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
@@ -938,7 +914,7 @@ func TestEvictAnnotation(t *testing.T) {
t.Logf("Existing pods: %v", initialPodNames)
t.Log("Running PodLifetime strategy")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, getPodsAssignedToNode)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil)
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
@@ -1312,36 +1288,3 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
t.Fatalf("Error waiting for pod running: %v", err)
}
}
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
var allNodes []*v1.Node
var workerNodes []*v1.Node
for i := range nodes {
node := nodes[i]
allNodes = append(allNodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists {
workerNodes = append(workerNodes, &node)
}
}
return allNodes, workerNodes
}
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
return evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
}

View File

@@ -1,196 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
func TestTooManyRestarts(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Logf("Creating testing namespace %v", t.Name())
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
deploymentObj := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "restart-pod",
Namespace: testNamespace.Name,
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Spec: appsv1.DeploymentSpec{
Replicas: func(i int32) *int32 { return &i }(4),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Always",
Image: "kubernetes/pause",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sleep 1s && exit 1"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}},
},
},
},
}
t.Logf("Creating deployment %v", deploymentObj.Name)
_, err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).Create(ctx, deploymentObj, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating deployment: %v", err)
if err = clientSet.AppsV1().Deployments(deploymentObj.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
}); err != nil {
t.Fatalf("Unable to delete deployment: %v", err)
}
return
}
defer clientSet.AppsV1().Deployments(deploymentObj.Namespace).Delete(ctx, deploymentObj.Name, metav1.DeleteOptions{})
// Need to wait restartCount more than 4
result, err := waitPodRestartCount(ctx, clientSet, testNamespace.Name, t)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if !result {
t.Fatal("Pod restart count not as expected")
}
tests := []struct {
name string
podRestartThreshold int32
includingInitContainers bool
expectedEvictedPodCount uint
}{
{
name: "test-no-evictions",
podRestartThreshold: int32(10000),
includingInitContainers: true,
expectedEvictedPodCount: 0,
},
{
name: "test-one-evictions",
podRestartThreshold: int32(4),
includingInitContainers: true,
expectedEvictedPodCount: 4,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
podEvictor := evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
true,
false,
false,
false,
false,
)
// Run RemovePodsHavingTooManyRestarts strategy
t.Log("Running RemovePodsHavingTooManyRestarts strategy")
strategies.RemovePodsHavingTooManyRestarts(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
PodsHavingTooManyRestarts: &deschedulerapi.PodsHavingTooManyRestarts{
PodRestartThreshold: tc.podRestartThreshold,
IncludingInitContainers: tc.includingInitContainers,
},
},
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, testNamespace.Name)
actualEvictedPodCount := podEvictor.TotalEvicted()
if actualEvictedPodCount < tc.expectedEvictedPodCount {
t.Errorf("Test error for description: %s. Unexpected number of pods have been evicted, got %v, expected %v", tc.name, actualEvictedPodCount, tc.expectedEvictedPodCount)
}
})
}
}
func waitPodRestartCount(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) (bool, error) {
timeout := time.After(5 * time.Minute)
tick := time.Tick(5 * time.Second)
for {
select {
case <-timeout:
t.Log("Timeout, still restart count not as expected")
return false, fmt.Errorf("timeout Error")
case <-tick:
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
})
if podList.Items[0].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[1].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[2].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[3].Status.ContainerStatuses[0].RestartCount >= 4 {
t.Log("Pod restartCount as expected")
return true, nil
}
if err != nil {
t.Fatalf("Unexpected err: %v", err)
return false, err
}
}
}
}

View File

@@ -1,159 +0,0 @@
package e2e
import (
"context"
"fmt"
"math"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
)
const zoneTopologyKey string = "topology.kubernetes.io/zone"
func TestTopologySpreadConstraint(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
testCases := map[string]struct {
replicaCount int
maxSkew int
labelKey string
labelValue string
constraint v1.UnsatisfiableConstraintAction
}{
"test-rc-topology-spread-hard-constraint": {
replicaCount: 4,
maxSkew: 1,
labelKey: "test",
labelValue: "topology-spread-hard-constraint",
constraint: v1.DoNotSchedule,
},
"test-rc-topology-spread-soft-constraint": {
replicaCount: 4,
maxSkew: 1,
labelKey: "test",
labelValue: "topology-spread-soft-constraint",
constraint: v1.ScheduleAnyway,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
t.Logf("Creating RC %s with %d replicas", name, tc.replicaCount)
rc := RcByNameContainer(name, testNamespace.Name, int32(tc.replicaCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s %v", name, err)
}
defer deleteRC(ctx, t, clientSet, rc)
waitForRCPodsRunning(ctx, t, clientSet, rc)
// Create a "Violator" RC that has the same label and is forced to be on the same node using a nodeSelector
violatorRcName := name + "-violator"
violatorCount := tc.maxSkew + 1
violatorRc := RcByNameContainer(violatorRcName, testNamespace.Name, int32(violatorCount), map[string]string{tc.labelKey: tc.labelValue}, nil, "")
violatorRc.Spec.Template.Spec.NodeSelector = map[string]string{zoneTopologyKey: workerNodes[0].Labels[zoneTopologyKey]}
rc.Spec.Template.Spec.TopologySpreadConstraints = makeTopologySpreadConstraints(tc.maxSkew, tc.labelKey, tc.labelValue, tc.constraint)
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, violatorRc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %s: %v", violatorRcName, err)
}
defer deleteRC(ctx, t, clientSet, violatorRc)
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
// Run TopologySpreadConstraint strategy
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
strategies.RemovePodsViolatingTopologySpreadConstraint(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
IncludeSoftConstraints: tc.constraint != v1.DoNotSchedule,
},
},
nodes,
podEvictor,
getPodsAssignedToNode,
)
t.Logf("Finished RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
t.Logf("Wait for terminating pods of %s to disappear", name)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
if totalEvicted := podEvictor.TotalEvicted(); totalEvicted > 0 {
t.Logf("Total of %d Pods were evicted for %s", totalEvicted, name)
} else {
t.Fatalf("Pods were not evicted for %s TopologySpreadConstraint", name)
}
// Ensure recently evicted Pod are rescheduled and running before asserting for a balanced topology spread
waitForRCPodsRunning(ctx, t, clientSet, rc)
pods, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", tc.labelKey, tc.labelValue)})
if err != nil {
t.Errorf("Error listing pods for %s: %v", name, err)
}
nodePodCountMap := make(map[string]int)
for _, pod := range pods.Items {
nodePodCountMap[pod.Spec.NodeName]++
}
if len(nodePodCountMap) != len(workerNodes) {
t.Errorf("%s Pods were scheduled on only '%d' nodes and were not properly distributed on the nodes", name, len(nodePodCountMap))
}
min, max := getMinAndMaxPodDistribution(nodePodCountMap)
if max-min > tc.maxSkew {
t.Errorf("Pod distribution for %s is still violating the max skew of %d as it is %d", name, tc.maxSkew, max-min)
}
t.Logf("Pods for %s were distributed in line with max skew of %d", name, tc.maxSkew)
})
}
}
func makeTopologySpreadConstraints(maxSkew int, labelKey, labelValue string, constraint v1.UnsatisfiableConstraintAction) []v1.TopologySpreadConstraint {
return []v1.TopologySpreadConstraint{
{
MaxSkew: int32(maxSkew),
TopologyKey: zoneTopologyKey,
WhenUnsatisfiable: constraint,
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelKey: labelValue}},
},
}
}
func getMinAndMaxPodDistribution(nodePodCountMap map[string]int) (int, int) {
min := math.MaxInt32
max := math.MinInt32
for _, podCount := range nodePodCountMap {
if podCount < min {
min = podCount
}
if podCount > max {
max = podCount
}
}
return min, max
}

View File

@@ -1,5 +1,3 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -x
set -o errexit
set -o nounset

View File

@@ -1,5 +1,3 @@
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
K8S_VERSION=${KUBERNETES_VERSION:-v1.23.0}
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1}
IMAGE_REPO=${HELM_IMAGE_REPO:-descheduler}
IMAGE_TAG=${HELM_IMAGE_TAG:-helm-test}
CHART_LOCATION=${HELM_CHART_LOCATION:-./charts/descheduler}
@@ -29,8 +28,8 @@ mv kind-linux-amd64 kind
export PATH=$PATH:$PWD
kind create cluster --image kindest/node:"${K8S_VERSION}" --config=./hack/kind_config.yaml
kind load docker-image descheduler:helm-test
helm install descheduler-ci --set image.repository="${IMAGE_REPO}",image.tag="${IMAGE_TAG}",schedule="* * * * *" --namespace kube-system "${CHART_LOCATION}"
sleep 61 # sleep until Job is triggered
helm install descheduler-ci --set image.repository="${IMAGE_REPO}",image.tag="${IMAGE_TAG}" --namespace kube-system "${CHART_LOCATION}"
sleep 20s
helm test descheduler-ci --namespace kube-system
# Delete kind cluster once test is finished

View File

@@ -1,5 +1,3 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -x
set -o errexit
set -o nounset

View File

@@ -140,7 +140,7 @@ func testOnGCE() bool {
}()
go func() {
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
@@ -296,7 +296,6 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) {
// being stable anyway.
host = metadataIP
}
suffix = strings.TrimLeft(suffix, "/")
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, err := http.NewRequest("GET", u, nil)
if err != nil {

View File

@@ -0,0 +1,12 @@
module github.com/Azure/go-autorest/autorest/adal
go 1.12
require (
github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/mocks v0.4.1
github.com/Azure/go-autorest/tracing v0.6.0
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
)

View File

@@ -0,0 +1,19 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@@ -28,7 +28,6 @@ const (
mimeTypeFormPost = "application/x-www-form-urlencoded"
)
// DO NOT ACCESS THIS DIRECTLY. go through sender()
var defaultSender Sender
var defaultSenderInit = &sync.Once{}

View File

@@ -30,13 +30,11 @@ import (
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/logger"
"github.com/form3tech-oss/jwt-go"
)
@@ -71,22 +69,13 @@ const (
defaultMaxMSIRefreshAttempts = 5
// asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions
msiEndpointEnv = "MSI_ENDPOINT"
asMSIEndpointEnv = "MSI_ENDPOINT"
// asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
msiSecretEnv = "MSI_SECRET"
asMSISecretEnv = "MSI_SECRET"
// the API version to use for the legacy App Service MSI endpoint
appServiceAPIVersion2017 = "2017-09-01"
// secret header used when authenticating against app service MSI endpoint
secretHeader = "Secret"
// the format for expires_on in UTC with AM/PM
expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00"
// the format for expires_on in UTC without AM/PM
expiresOnDateFormat = "1/2/2006 15:04:05 +00:00"
// the API version to use for the App Service MSI endpoint
appServiceAPIVersion = "2017-09-01"
)
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
@@ -293,8 +282,6 @@ func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) {
// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension.
type ServicePrincipalMSISecret struct {
msiType msiType
clientResourceID string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
@@ -665,173 +652,94 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie
)
}
type msiType int
const (
msiTypeUnavailable msiType = iota
msiTypeAppServiceV20170901
msiTypeCloudShell
msiTypeIMDS
)
func (m msiType) String() string {
switch m {
case msiTypeUnavailable:
return "unavailable"
case msiTypeAppServiceV20170901:
return "AppServiceV20170901"
case msiTypeCloudShell:
return "CloudShell"
case msiTypeIMDS:
return "IMDS"
default:
return fmt.Sprintf("unhandled MSI type %d", m)
}
}
// returns the MSI type and endpoint, or an error
func getMSIType() (msiType, string, error) {
if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" {
// if the env var MSI_ENDPOINT is set
if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" {
// if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService
return msiTypeAppServiceV20170901, endpointEnvVar, nil
}
// if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell
return msiTypeCloudShell, endpointEnvVar, nil
} else if msiAvailableHook(context.Background(), sender()) {
// if MSI_ENDPOINT is NOT set AND the IMDS endpoint is available the msiType is IMDS. This will timeout after 500 milliseconds
return msiTypeIMDS, msiEndpoint, nil
} else {
// if MSI_ENDPOINT is NOT set and IMDS endpoint is not available Managed Identity is not available
return msiTypeUnavailable, "", errors.New("MSI not available")
}
}
// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines.
// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell.
// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIVMEndpoint() (string, error) {
return msiEndpoint, nil
}
// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions.
// It will return an error when not running in an app service/functions environment.
// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
// NOTE: this only indicates if the ASE environment credentials have been set
// which does not necessarily mean that the caller is authenticating via ASE!
func isAppService() bool {
_, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
_, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
return asMSIEndpointEnvExists && asMSISecretEnvExists
}
// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions
func GetMSIAppServiceEndpoint() (string, error) {
msiType, endpoint, err := getMSIType()
if err != nil {
return "", err
}
switch msiType {
case msiTypeAppServiceV20170901:
return endpoint, nil
default:
return "", fmt.Errorf("%s is not app service environment", msiType)
asMSIEndpoint, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
if asMSIEndpointEnvExists {
return asMSIEndpoint, nil
}
return "", errors.New("MSI endpoint not found")
}
// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment
// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint.
func GetMSIEndpoint() (string, error) {
_, endpoint, err := getMSIType()
return endpoint, err
if isAppService() {
return GetMSIAppServiceEndpoint()
}
return GetMSIVMEndpoint()
}
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...)
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...)
}
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the clientID of specified user assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil {
return nil, err
}
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...)
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...)
}
// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the azure resource id of user assigned identity when creating the token.
// msiEndpoint - empty string, or pass a non-empty string to override the default value.
// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead.
func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
return nil, err
}
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...)
}
// ManagedIdentityOptions contains optional values for configuring managed identity authentication.
type ManagedIdentityOptions struct {
// ClientID is the user-assigned identity to use during authentication.
// It is mutually exclusive with IdentityResourceID.
ClientID string
// IdentityResourceID is the resource ID of the user-assigned identity to use during authentication.
// It is mutually exclusive with ClientID.
IdentityResourceID string
}
// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity.
// It supports the following managed identity environments.
// - App Service Environment (API version 2017-09-01 only)
// - Cloud shell
// - IMDS with a system or user assigned identity
func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if options == nil {
options = &ManagedIdentityOptions{}
}
return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if userAssignedID != "" && identityResourceID != "" {
return nil, errors.New("cannot specify userAssignedID and identityResourceID")
if userAssignedID != nil {
if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil {
return nil, err
}
}
msiType, endpoint, err := getMSIType()
if err != nil {
logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v", err)
return nil, err
if identityResourceID != nil {
if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil {
return nil, err
}
}
logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s", msiType, endpoint)
if msiEndpoint != "" {
endpoint = msiEndpoint
logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s", endpoint)
}
msiEndpointURL, err := url.Parse(endpoint)
// We set the oauth config token endpoint to be MSI's endpoint
msiEndpointURL, err := url.Parse(msiEndpoint)
if err != nil {
return nil, err
}
// cloud shell sends its data in the request body
if msiType != msiTypeCloudShell {
v := url.Values{}
v.Set("resource", resource)
clientIDParam := "client_id"
switch msiType {
case msiTypeAppServiceV20170901:
clientIDParam = "clientid"
v.Set("api-version", appServiceAPIVersion2017)
break
case msiTypeIMDS:
v.Set("api-version", msiAPIVersion)
}
if userAssignedID != "" {
v.Set(clientIDParam, userAssignedID)
} else if identityResourceID != "" {
v.Set("mi_res_id", identityResourceID)
}
msiEndpointURL.RawQuery = v.Encode()
v := url.Values{}
v.Set("resource", resource)
// App Service MSI currently only supports token API version 2017-09-01
if isAppService() {
v.Set("api-version", appServiceAPIVersion)
} else {
v.Set("api-version", msiAPIVersion)
}
if userAssignedID != nil {
v.Set("client_id", *userAssignedID)
}
if identityResourceID != nil {
v.Set("mi_res_id", *identityResourceID)
}
msiEndpointURL.RawQuery = v.Encode()
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
@@ -839,14 +747,10 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, iden
OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL,
},
Secret: &ServicePrincipalMSISecret{
msiType: msiType,
clientResourceID: identityResourceID,
},
Secret: &ServicePrincipalMSISecret{},
Resource: resource,
AutoRefresh: true,
RefreshWithin: defaultRefresh,
ClientID: userAssignedID,
},
refreshLock: &sync.RWMutex{},
sender: sender(),
@@ -854,6 +758,10 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, iden
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
if userAssignedID != nil {
spt.inner.ClientID = *userAssignedID
}
return spt, nil
}
@@ -950,6 +858,31 @@ func (spt *ServicePrincipalToken) getGrantType() string {
}
}
func isIMDS(u url.URL) bool {
return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
}
func isMSIEndpoint(endpoint url.URL) bool {
msi, err := url.Parse(msiEndpoint)
if err != nil {
return false
}
return endpoint.Host == msi.Host && endpoint.Path == msi.Path
}
func isASEEndpoint(endpoint url.URL) bool {
aseEndpoint, err := GetMSIAppServiceEndpoint()
if err != nil {
// app service environment isn't enabled
return false
}
ase, err := url.Parse(aseEndpoint)
if err != nil {
return false
}
return endpoint.Host == ase.Host && endpoint.Path == ase.Path
}
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
if spt.customRefreshFunc != nil {
token, err := spt.customRefreshFunc(ctx, resource)
@@ -959,45 +892,19 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
spt.inner.Token = *token
return spt.InvokeRefreshCallbacks(spt.inner.Token)
}
req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil)
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
req.Header.Add("User-Agent", UserAgent())
req = req.WithContext(ctx)
var resp *http.Response
authBodyFilter := func(b []byte) []byte {
if logger.Level() != logger.LogAuth {
return []byte("**REDACTED** authentication body")
}
return b
// Add header when runtime is on App Service or Functions
if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
req.Header.Add("Secret", asMSISecret)
}
if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
switch msiSecret.msiType {
case msiTypeAppServiceV20170901:
req.Method = http.MethodGet
req.Header.Set("secret", os.Getenv(msiSecretEnv))
break
case msiTypeCloudShell:
req.Header.Set("Metadata", "true")
data := url.Values{}
data.Set("resource", spt.inner.Resource)
if spt.inner.ClientID != "" {
data.Set("client_id", spt.inner.ClientID)
} else if msiSecret.clientResourceID != "" {
data.Set("msi_res_id", msiSecret.clientResourceID)
}
req.Body = ioutil.NopCloser(strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
break
case msiTypeIMDS:
req.Method = http.MethodGet
req.Header.Set("Metadata", "true")
break
}
logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{}
v.Set("client_id", spt.inner.ClientID)
v.Set("resource", resource)
@@ -1026,26 +933,40 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
req.Body = body
logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter})
}
if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok {
req.Method = http.MethodGet
req.Header.Set(metadataHeader, "true")
}
var resp *http.Response
if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = getMSIEndpoint(ctx, spt.sender)
if err != nil {
// return a TokenRefreshError here so that we don't keep retrying
return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil)
}
resp.Body.Close()
}
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
resp, err = spt.sender.Do(req)
}
// don't return a TokenRefreshError here; this will allow retry logic to apply
if err != nil {
// don't return a TokenRefreshError here; this will allow retry logic to apply
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)
} else if resp == nil {
return fmt.Errorf("adal: received nil response and error")
}
logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter})
defer resp.Body.Close()
rb, err := ioutil.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
if err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp)
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp)
}
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp)
return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp)
}
// for the following error cases don't return a TokenRefreshError. the operation succeeded
@@ -1058,60 +979,15 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if len(strings.Trim(string(rb), " ")) == 0 {
return fmt.Errorf("adal: Empty service principal token received during refresh")
}
token := struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
// AAD returns expires_in as a string, ADFS returns it as an int
ExpiresIn json.Number `json:"expires_in"`
// expires_on can be in two formats, a UTC time stamp or the number of seconds.
ExpiresOn string `json:"expires_on"`
NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}{}
// return a TokenRefreshError in the follow error cases as the token is in an unexpected format
var token Token
err = json.Unmarshal(rb, &token)
if err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp)
return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb))
}
expiresOn := json.Number("")
// ADFS doesn't include the expires_on field
if token.ExpiresOn != "" {
if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp)
}
}
spt.inner.Token.AccessToken = token.AccessToken
spt.inner.Token.RefreshToken = token.RefreshToken
spt.inner.Token.ExpiresIn = token.ExpiresIn
spt.inner.Token.ExpiresOn = expiresOn
spt.inner.Token.NotBefore = token.NotBefore
spt.inner.Token.Resource = token.Resource
spt.inner.Token.Type = token.Type
return spt.InvokeRefreshCallbacks(spt.inner.Token)
}
spt.inner.Token = token
// converts expires_on to the number of seconds
func parseExpiresOn(s string) (json.Number, error) {
// convert the expiration date to the number of seconds from now
timeToDuration := func(t time.Time) json.Number {
dur := t.Sub(time.Now().UTC())
return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10))
}
if _, err := strconv.ParseInt(s, 10, 64); err == nil {
// this is the number of seconds case, no conversion required
return json.Number(s), nil
} else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil {
return timeToDuration(eo), nil
} else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil {
return timeToDuration(eo), nil
} else {
// unknown format
return json.Number(""), err
}
return spt.InvokeRefreshCallbacks(token)
}
// retry logic specific to retrieving a token from the IMDS endpoint
@@ -1242,6 +1118,46 @@ func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string {
return tokens
}
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh primary token: %v", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %v", err)
}
}
return nil
}
// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource.
func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) {
if err := validateStringParam(clientID, "clientID"); err != nil {
@@ -1272,55 +1188,6 @@ func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig,
return &m, nil
}
// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource.
func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) {
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if certificate == nil {
return nil, fmt.Errorf("parameter 'certificate' cannot be nil")
}
if privateKey == nil {
return nil, fmt.Errorf("parameter 'privateKey' cannot be nil")
}
auxTenants := multiTenantCfg.AuxiliaryTenants()
m := MultiTenantServicePrincipalToken{
AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)),
}
primary, err := NewServicePrincipalTokenWithSecret(
*multiTenantCfg.PrimaryTenant(),
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
)
if err != nil {
return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err)
}
m.PrimaryToken = primary
for i := range auxTenants {
aux, err := NewServicePrincipalTokenWithSecret(
*auxTenants[i],
clientID,
resource,
&ServicePrincipalCertificateSecret{
PrivateKey: privateKey,
Certificate: certificate,
},
)
if err != nil {
return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err)
}
m.AuxiliaryTokens[i] = aux
}
return &m, nil
}
// MSIAvailable returns true if the MSI endpoint is available for authentication.
func MSIAvailable(ctx context.Context, sender Sender) bool {
resp, err := getMSIEndpoint(ctx, sender)
@@ -1329,8 +1196,3 @@ func MSIAvailable(ctx context.Context, sender Sender) bool {
}
return err == nil
}
// used for testing purposes
var msiAvailableHook = func(ctx context.Context, sender Sender) bool {
return MSIAvailable(ctx, sender)
}

View File

@@ -18,12 +18,13 @@ package adal
import (
"context"
"fmt"
"net/http"
"time"
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
// http.NewRequestWithContext() was added in Go 1.13
@@ -33,43 +34,3 @@ func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error)
req.URL.RawQuery = q.Encode()
return sender.Do(req)
}
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh primary token: %w", err)
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return fmt.Errorf("failed to refresh auxiliary token: %w", err)
}
}
return nil
}

View File

@@ -23,6 +23,8 @@ import (
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
@@ -32,43 +34,3 @@ func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error)
req.URL.RawQuery = q.Encode()
return sender.Do(req)
}
// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by
// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use.
func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.EnsureFreshWithContext(ctx); err != nil {
return err
}
}
return nil
}
// RefreshWithContext obtains a fresh token for the Service Principal.
func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error {
if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshWithContext(ctx); err != nil {
return err
}
}
return nil
}
// RefreshExchangeWithContext refreshes the token, but for a different resource.
func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error {
if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil {
return err
}
for _, aux := range mt.AuxiliaryTokens {
if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil {
return err
}
}
return nil
}

View File

@@ -42,52 +42,6 @@ const (
var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK}
// FutureAPI contains the set of methods on the Future type.
type FutureAPI interface {
// Response returns the last HTTP response.
Response() *http.Response
// Status returns the last status message of the operation.
Status() string
// PollingMethod returns the method used to monitor the status of the asynchronous operation.
PollingMethod() PollingMethodType
// DoneWithContext queries the service to see if the operation has completed.
DoneWithContext(context.Context, autorest.Sender) (bool, error)
// GetPollingDelay returns a duration the application should wait before checking
// the status of the asynchronous request and true; this value is returned from
// the service via the Retry-After response header. If the header wasn't returned
// then the function returns the zero-value time.Duration and false.
GetPollingDelay() (time.Duration, bool)
// WaitForCompletionRef will return when one of the following conditions is met: the long
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
// If no deadline is specified in the context then the client.PollingDuration will be
// used to determine if a default deadline should be used.
// If PollingDuration is greater than zero the value will be used as the context's timeout.
// If PollingDuration is zero then no default deadline will be used.
WaitForCompletionRef(context.Context, autorest.Client) error
// MarshalJSON implements the json.Marshaler interface.
MarshalJSON() ([]byte, error)
// MarshalJSON implements the json.Unmarshaler interface.
UnmarshalJSON([]byte) error
// PollingURL returns the URL used for retrieving the status of the long-running operation.
PollingURL() string
// GetResult should be called once polling has completed successfully.
// It makes the final GET call to retrieve the resultant payload.
GetResult(autorest.Sender) (*http.Response, error)
}
var _ FutureAPI = (*Future)(nil)
// Future provides a mechanism to access the status and results of an asynchronous request.
// Since futures are stateful they should be passed by value to avoid race conditions.
type Future struct {

View File

@@ -37,9 +37,6 @@ const (
// should be included in the response.
HeaderReturnClientID = "x-ms-return-client-request-id"
// HeaderContentType is the type of the content in the HTTP response.
HeaderContentType = "Content-Type"
// HeaderRequestID is the Azure extension header of the service generated request ID returned
// in the response.
HeaderRequestID = "x-ms-request-id"
@@ -92,85 +89,54 @@ func (se ServiceError) Error() string {
// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type.
func (se *ServiceError) UnmarshalJSON(b []byte) error {
// per the OData v4 spec the details field must be an array of JSON objects.
// unfortunately not all services adhear to the spec and just return a single
// object instead of an array with one object. so we have to perform some
// shenanigans to accommodate both cases.
// http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091
type serviceErrorInternal struct {
type serviceError1 struct {
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target,omitempty"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"`
// not all services conform to the OData v4 spec.
// the following fields are where we've seen discrepancies
// spec calls for []map[string]interface{} but have seen map[string]interface{}
Details interface{} `json:"details,omitempty"`
// spec calls for map[string]interface{} but have seen []map[string]interface{} and string
InnerError interface{} `json:"innererror,omitempty"`
Target *string `json:"target"`
Details []map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
}
sei := serviceErrorInternal{}
if err := json.Unmarshal(b, &sei); err != nil {
return err
type serviceError2 struct {
Code string `json:"code"`
Message string `json:"message"`
Target *string `json:"target"`
Details map[string]interface{} `json:"details"`
InnerError map[string]interface{} `json:"innererror"`
AdditionalInfo []map[string]interface{} `json:"additionalInfo"`
}
// copy the fields we know to be correct
se.AdditionalInfo = sei.AdditionalInfo
se.Code = sei.Code
se.Message = sei.Message
se.Target = sei.Target
// converts an []interface{} to []map[string]interface{}
arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) {
arrayOf, ok := v.([]interface{})
if !ok {
return nil, false
}
final := []map[string]interface{}{}
for _, item := range arrayOf {
as, ok := item.(map[string]interface{})
if !ok {
return nil, false
}
final = append(final, as)
}
return final, true
se1 := serviceError1{}
err := json.Unmarshal(b, &se1)
if err == nil {
se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo)
return nil
}
// convert the remaining fields, falling back to raw JSON if necessary
if c, ok := arrayOfObjs(sei.Details); ok {
se.Details = c
} else if c, ok := sei.Details.(map[string]interface{}); ok {
se.Details = []map[string]interface{}{c}
} else if sei.Details != nil {
// stuff into Details
se.Details = []map[string]interface{}{
{"raw": sei.Details},
}
se2 := serviceError2{}
err = json.Unmarshal(b, &se2)
if err == nil {
se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo)
se.Details = append(se.Details, se2.Details)
return nil
}
return err
}
if c, ok := sei.InnerError.(map[string]interface{}); ok {
se.InnerError = c
} else if c, ok := arrayOfObjs(sei.InnerError); ok {
// if there's only one error extract it
if len(c) == 1 {
se.InnerError = c[0]
} else {
// multiple errors, stuff them into the value
se.InnerError = map[string]interface{}{
"multi": c,
}
}
} else if c, ok := sei.InnerError.(string); ok {
se.InnerError = map[string]interface{}{"error": c}
} else if sei.InnerError != nil {
// stuff into InnerError
se.InnerError = map[string]interface{}{
"raw": sei.InnerError,
}
}
return nil
func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) {
se.Code = code
se.Message = message
se.Target = target
se.Details = details
se.InnerError = inner
se.AdditionalInfo = additional
}
// RequestError describes an error response returned by Azure service.
@@ -341,30 +307,16 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
// Check if error is unwrapped ServiceError
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&e.ServiceError); err != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
}
// for example, should the API return the literal value `null` as the response
if e.ServiceError == nil {
e.ServiceError = &ServiceError{
Code: "Unknown",
Message: "Unknown service error",
Details: []map[string]interface{}{
{
"HttpResponse.Body": b.String(),
},
},
}
return err
}
}
if e.ServiceError != nil && e.ServiceError.Message == "" {
if e.ServiceError.Message == "" {
// if we're here it means the returned error wasn't OData v4 compliant.
// try to unmarshal the body in hopes of getting something.
rawBody := map[string]interface{}{}
decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes()))
if err := decoder.Decode(&rawBody); err != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err)
return err
}
e.ServiceError = &ServiceError{

View File

@@ -17,7 +17,6 @@ package autorest
import (
"bytes"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
@@ -166,8 +165,7 @@ type Client struct {
// Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration
// RetryAttempts sets the total number of times the client will attempt to make an HTTP request.
// Set the value to 1 to disable retries. DO NOT set the value to less than 1.
// RetryAttempts sets the default number of retry attempts for client.
RetryAttempts int
// RetryDuration sets the delay duration for retries.
@@ -261,9 +259,6 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
},
})
resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
if resp == nil && err == nil {
err = errors.New("autorest: received nil response and error")
}
logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting())
return resp, err

View File

@@ -0,0 +1,5 @@
module github.com/Azure/go-autorest/autorest/date
go 1.12
require github.com/Azure/go-autorest v14.2.0+incompatible

View File

@@ -0,0 +1,2 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=

Some files were not shown because too many files have changed in this diff Show More