mirror of
https://github.com/kubernetes-sigs/descheduler.git
synced 2026-01-26 05:14:13 +01:00
Compare commits
13 Commits
deschedule
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c93babc6d | ||
|
|
d0d2af4eac | ||
|
|
d51fb9d6c3 | ||
|
|
6dd4bf83f0 | ||
|
|
fd6c9a68a1 | ||
|
|
bbf8878b96 | ||
|
|
9ed7f93abd | ||
|
|
17128794ed | ||
|
|
09dcf88149 | ||
|
|
5e63610474 | ||
|
|
6cdfab1aa9 | ||
|
|
2e343a5202 | ||
|
|
547dbcec65 |
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM golang:1.18.2
|
||||
FROM golang:1.17.7
|
||||
|
||||
WORKDIR /go/src/sigs.k8s.io/descheduler
|
||||
COPY . .
|
||||
|
||||
4
Makefile
4
Makefile
@@ -24,7 +24,7 @@ ARCHS = amd64 arm arm64
|
||||
|
||||
LDFLAGS=-ldflags "-X ${LDFLAG_LOCATION}.version=${VERSION} -X ${LDFLAG_LOCATION}.buildDate=${BUILD} -X ${LDFLAG_LOCATION}.gitbranch=${BRANCH} -X ${LDFLAG_LOCATION}.gitsha1=${SHA1}"
|
||||
|
||||
GOLANGCI_VERSION := v1.46.1
|
||||
GOLANGCI_VERSION := v1.43.0
|
||||
HAS_GOLANGCI := $(shell ls _output/bin/golangci-lint 2> /dev/null)
|
||||
|
||||
# REGISTRY is the container registry to push
|
||||
@@ -144,4 +144,4 @@ test-helm: ensure-helm-install
|
||||
ensure-helm-install:
|
||||
ifndef HAS_HELM
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 ./get_helm.sh && ./get_helm.sh
|
||||
endif
|
||||
endif
|
||||
2
OWNERS
2
OWNERS
@@ -10,8 +10,6 @@ reviewers:
|
||||
- seanmalloy
|
||||
- ingvagabund
|
||||
- lixiang233
|
||||
- a7i
|
||||
- janeliul
|
||||
emeritus_approvers:
|
||||
- aveshagarwal
|
||||
- k82cn
|
||||
|
||||
111
README.md
111
README.md
@@ -50,8 +50,6 @@ Table of Contents
|
||||
- [Node Fit filtering](#node-fit-filtering)
|
||||
- [Pod Evictions](#pod-evictions)
|
||||
- [Pod Disruption Budget (PDB)](#pod-disruption-budget-pdb)
|
||||
- [High Availability](#high-availability)
|
||||
- [Configure HA Mode](#configure-ha-mode)
|
||||
- [Metrics](#metrics)
|
||||
- [Compatibility Matrix](#compatibility-matrix)
|
||||
- [Getting Involved and Contributing](#getting-involved-and-contributing)
|
||||
@@ -105,17 +103,17 @@ See the [resources | Kustomize](https://kubectl.docs.kubernetes.io/references/ku
|
||||
|
||||
Run As A Job
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.24.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/job?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A CronJob
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.24.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/cronjob?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
Run As A Deployment
|
||||
```
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.24.1' | kubectl apply -f -
|
||||
kustomize build 'github.com/kubernetes-sigs/descheduler/kubernetes/deployment?ref=v0.22.0' | kubectl apply -f -
|
||||
```
|
||||
|
||||
## User Guide
|
||||
@@ -134,7 +132,6 @@ The policy includes a common configuration that applies to all the strategies:
|
||||
| `evictSystemCriticalPods` | `false` | [Warning: Will evict Kubernetes system pods] allows eviction of pods with any priority, including system pods like kube-dns |
|
||||
| `ignorePvcPods` | `false` | set whether PVC pods should be evicted or ignored |
|
||||
| `maxNoOfPodsToEvictPerNode` | `nil` | maximum number of pods evicted from each node (summed through all strategies) |
|
||||
| `maxNoOfPodsToEvictPerNamespace` | `nil` | maximum number of pods evicted from each namespace (summed through all strategies) |
|
||||
| `evictFailedBarePods` | `false` | allow eviction of pods without owner references and in failed phase |
|
||||
|
||||
As part of the policy, the parameters associated with each strategy can be configured.
|
||||
@@ -221,17 +218,6 @@ These thresholds, `thresholds` and `targetThresholds`, could be tuned as per you
|
||||
strategy evicts pods from `overutilized nodes` (those with usage above `targetThresholds`) to `underutilized nodes`
|
||||
(those with usage below `thresholds`), it will abort if any number of `underutilized nodes` or `overutilized nodes` is zero.
|
||||
|
||||
Additionally, the strategy accepts a `useDeviationThresholds` parameter.
|
||||
If that parameter is set to `true`, the thresholds are considered as percentage deviations from mean resource usage.
|
||||
`thresholds` will be deducted from the mean among all nodes and `targetThresholds` will be added to the mean.
|
||||
A resource consumption above (resp. below) this window is considered as overutilization (resp. underutilization).
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -239,7 +225,6 @@ actual usage metrics. Implementing metrics-based descheduling is currently TODO
|
||||
|`thresholds`|map(string:int)|
|
||||
|`targetThresholds`|map(string:int)|
|
||||
|`numberOfNodes`|int|
|
||||
|`useDeviationThresholds`|bool|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`nodeFit`|bool (see [node fit filtering](#node-fit-filtering))|
|
||||
@@ -280,10 +265,10 @@ under utilized frequently or for a short period of time. By default, `numberOfNo
|
||||
|
||||
### HighNodeUtilization
|
||||
|
||||
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||
This strategy finds nodes that are under utilized and evicts pods from the nodes in the hope that these pods will be
|
||||
scheduled compactly into fewer nodes. Used in conjunction with node auto-scaling, this strategy is intended to help
|
||||
trigger down scaling of under utilized nodes.
|
||||
This strategy **must** be used with the scheduler scoring strategy `MostAllocated`. The parameters of this strategy are
|
||||
This strategy **must** be used with the scheduler strategy `MostRequestedPriority`. The parameters of this strategy are
|
||||
configured under `nodeResourceUtilizationThresholds`.
|
||||
|
||||
The under utilization of nodes is determined by a configurable threshold `thresholds`. The threshold
|
||||
@@ -300,12 +285,6 @@ strategy evicts pods from `underutilized nodes` (those with usage below `thresho
|
||||
so that they can be recreated in appropriately utilized nodes.
|
||||
The strategy will abort if any number of `underutilized nodes` or `appropriately utilized nodes` is zero.
|
||||
|
||||
**NOTE:** Node resource consumption is determined by the requests and limits of pods, not actual usage.
|
||||
This approach is chosen in order to maintain consistency with the kube-scheduler, which follows the same
|
||||
design for scheduling pods onto nodes. This means that resource usage as reported by Kubelet (or commands
|
||||
like `kubectl top`) may differ from the calculated consumption, due to these components reporting
|
||||
actual usage metrics. Implementing metrics-based descheduling is currently TODO for the project.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
@@ -420,17 +399,10 @@ pod "podA" with a toleration to tolerate a taint ``key=value:NoSchedule`` schedu
|
||||
node. If the node's taint is subsequently updated/removed, taint is no longer satisfied by its pods' tolerations
|
||||
and will be evicted.
|
||||
|
||||
Node taints can be excluded from consideration by specifying a list of excludedTaints. If a node taint key **or**
|
||||
key=value matches an excludedTaints entry, the taint will be ignored.
|
||||
|
||||
For example, excludedTaints entry "dedicated" would match all taints with key "dedicated", regardless of value.
|
||||
excludedTaints entry "dedicated=special-user" would match taints with key "dedicated" and value "special-user".
|
||||
|
||||
**Parameters:**
|
||||
|
||||
|Name|Type|
|
||||
|---|---|
|
||||
|`excludedTaints`|list(string)|
|
||||
|`thresholdPriority`|int (see [priority filtering](#priority-filtering))|
|
||||
|`thresholdPriorityClassName`|string (see [priority filtering](#priority-filtering))|
|
||||
|`namespaces`|(see [namespace filtering](#namespace-filtering))|
|
||||
@@ -445,10 +417,6 @@ kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"RemovePodsViolatingNodeTaints":
|
||||
enabled: true
|
||||
params:
|
||||
excludedTaints:
|
||||
- dedicated=special-user # exclude taints with key "dedicated" and value "special-user"
|
||||
- reserved # exclude all taints with key "reserved"
|
||||
````
|
||||
|
||||
### RemovePodsViolatingTopologySpreadConstraint
|
||||
@@ -689,7 +657,7 @@ does not exist, descheduler won't create it and will throw an error.
|
||||
|
||||
### Label filtering
|
||||
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)
|
||||
The following strategies can configure a [standard kubernetes labelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta)
|
||||
to filter pods by their labels:
|
||||
|
||||
* `PodLifeTime`
|
||||
@@ -737,9 +705,8 @@ The following strategies accept a `nodeFit` boolean parameter which can optimize
|
||||
|
||||
If set to `true` the descheduler will consider whether or not the pods that meet eviction criteria will fit on other nodes before evicting them. If a pod cannot be rescheduled to another node, it will not be evicted. Currently the following criteria are considered when setting `nodeFit` to `true`:
|
||||
- A `nodeSelector` on the pod
|
||||
- Any `tolerations` on the pod and any `taints` on the other nodes
|
||||
- Any `Tolerations` on the pod and any `Taints` on the other nodes
|
||||
- `nodeAffinity` on the pod
|
||||
- Resource `requests` made by the pod and the resources available on other nodes
|
||||
- Whether any of the other nodes are marked as `unschedulable`
|
||||
|
||||
E.g.
|
||||
@@ -749,17 +716,17 @@ apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"LowNodeUtilization":
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu": 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu": 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
enabled: true
|
||||
params:
|
||||
nodeResourceUtilizationThresholds:
|
||||
thresholds:
|
||||
"cpu" : 20
|
||||
"memory": 20
|
||||
"pods": 20
|
||||
targetThresholds:
|
||||
"cpu" : 50
|
||||
"memory": 50
|
||||
"pods": 50
|
||||
nodeFit: true
|
||||
```
|
||||
|
||||
@@ -794,23 +761,6 @@ Setting `--v=4` or greater on the Descheduler will log all reasons why any pod i
|
||||
Pods subject to a Pod Disruption Budget(PDB) are not evicted if descheduling violates its PDB. The pods
|
||||
are evicted by using the eviction subresource to handle PDB.
|
||||
|
||||
## High Availability
|
||||
|
||||
In High Availability mode, Descheduler starts [leader election](https://github.com/kubernetes/client-go/tree/master/tools/leaderelection) process in Kubernetes. You can activate HA mode
|
||||
if you choose to deploy your application as Deployment.
|
||||
|
||||
Deployment starts with 1 replica by default. If you want to use more than 1 replica, you must consider
|
||||
enable High Availability mode since we don't want to run descheduler pods simultaneously.
|
||||
|
||||
### Configure HA Mode
|
||||
|
||||
The leader election process can be enabled by setting `--leader-elect` in the CLI. You can also set
|
||||
`--set=leaderElection.enabled=true` flag if you are using Helm.
|
||||
|
||||
To get best results from HA mode some additional configurations might require:
|
||||
* Configure a [podAntiAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node) rule if you want to schedule onto a node only if that node is in the same zone as at least one already-running descheduler
|
||||
* Set the replica count greater than 1
|
||||
|
||||
## Metrics
|
||||
|
||||
| name | type | description |
|
||||
@@ -830,18 +780,17 @@ v0.18 should work with k8s v1.18, v1.17, and v1.16.
|
||||
Starting with descheduler release v0.18 the minor version of descheduler matches the minor version of the k8s client
|
||||
packages that it is compiled with.
|
||||
|
||||
| Descheduler | Supported Kubernetes Version |
|
||||
|-------------|------------------------------|
|
||||
| v0.24 | v1.24 |
|
||||
| v0.23 | v1.23 |
|
||||
| v0.22 | v1.22 |
|
||||
| v0.21 | v1.21 |
|
||||
| v0.20 | v1.20 |
|
||||
| v0.19 | v1.19 |
|
||||
| v0.18 | v1.18 |
|
||||
| v0.10 | v1.17 |
|
||||
| v0.4-v0.9 | v1.9+ |
|
||||
| v0.1-v0.3 | v1.7-v1.8 |
|
||||
Descheduler | Supported Kubernetes Version
|
||||
-------------|-----------------------------
|
||||
v0.22 | v1.22
|
||||
v0.21 | v1.21
|
||||
v0.20 | v1.20
|
||||
v0.19 | v1.19
|
||||
v0.18 | v1.18
|
||||
v0.10 | v1.17
|
||||
v0.4-v0.9 | v1.9+
|
||||
v0.1-v0.3 | v1.7-v1.8
|
||||
|
||||
|
||||
## Getting Involved and Contributing
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: descheduler
|
||||
version: 0.24.1
|
||||
appVersion: 0.24.1
|
||||
version: 0.23.2
|
||||
appVersion: 0.23.1
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
keywords:
|
||||
- kubernetes
|
||||
|
||||
@@ -43,46 +43,30 @@ The command removes all the Kubernetes components associated with the chart and
|
||||
|
||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------------------------------|
|
||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
|
||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||
| `image.repository` | Docker repository to use | `k8s.gcr.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `nil` |
|
||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `deschedulerPolicy.strategies` | The _descheduler_ strategies to apply | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `podSecurityPolicy.create` | If `true`, create PodSecurityPolicy | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob on specific nodes | `nil` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
|
||||
@@ -1,7 +1 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.replicas 1.0}}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
Descheduler installed as a {{ .Values.kind }} .
|
||||
|
||||
@@ -65,30 +65,3 @@ Create the name of the service account to use
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Leader Election
|
||||
*/}}
|
||||
{{- define "descheduler.leaderElection"}}
|
||||
{{- if .Values.leaderElection -}}
|
||||
- --leader-elect={{ .Values.leaderElection.enabled }}
|
||||
{{- if .Values.leaderElection.leaseDuration }}
|
||||
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.renewDeadline }}
|
||||
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.retryPeriod }}
|
||||
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceLock }}
|
||||
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceNamescape }}
|
||||
- --leader-elect-resource-namespace={{ .Values.leaderElection.resourceNamescape }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -24,15 +24,6 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityPolicy.create }}
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
|
||||
@@ -2,7 +2,6 @@ apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
|
||||
@@ -3,7 +3,6 @@ apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
|
||||
@@ -3,18 +3,10 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if gt .Values.replicas 1.0}}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
|
||||
{{- else }}
|
||||
replicas: 1
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
@@ -35,10 +27,6 @@ spec:
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
@@ -56,7 +44,6 @@ spec:
|
||||
- {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
@@ -3,7 +3,6 @@ apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.service.enabled true }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10258
|
||||
protocol: TCP
|
||||
targetPort: 10258
|
||||
selector:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 4 }}
|
||||
type: ClusterIP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -3,7 +3,6 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.serviceMonitor.enabled true }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}-servicemonitor
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
endpoints:
|
||||
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
|
||||
port: http-metrics
|
||||
{{- if .Values.serviceMonitor.interval }}
|
||||
interval: {{ .Values.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.serviceMonitor.serverName }}
|
||||
{{- end}}
|
||||
{{- if .Values.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -2,7 +2,6 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: descheduler-test-pod
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
@@ -27,4 +26,4 @@ spec:
|
||||
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&
|
||||
chmod +x ./kubectl &&
|
||||
mv ./kubectl /usr/local/bin/kubectl &&
|
||||
/usr/local/bin/kubectl get pods --namespace {{ .Release.Namespace }} --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"
|
||||
/usr/local/bin/kubectl get pods --namespace kube-system --token "$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" | grep "descheduler" | grep "Completed"
|
||||
@@ -11,8 +11,7 @@ image:
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets:
|
||||
# - name: container-registry-secret
|
||||
imagePullSecrets: []
|
||||
|
||||
resources:
|
||||
requests:
|
||||
@@ -38,32 +37,13 @@ suspend: false
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
|
||||
# Specifies the replica count for Deployment
|
||||
# Set leaderElection if you want to use more than 1 replica
|
||||
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
|
||||
# only if that node is in the same zone as at least one already-running descheduler
|
||||
replicas: 1
|
||||
|
||||
# Specifies whether Leader Election resources should be created
|
||||
# Required when running as a Deployment
|
||||
leaderElection: {}
|
||||
# enabled: true
|
||||
# leaseDuration: 15s
|
||||
# renewDeadline: 10s
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamescape: "kube-system"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
# evict-local-storage-pods:
|
||||
# max-pods-to-evict-per-node: 10
|
||||
# node-selector: "key1=value1,key2=value2"
|
||||
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
strategies:
|
||||
RemoveDuplicates:
|
||||
enabled: true
|
||||
@@ -104,15 +84,7 @@ affinity: {}
|
||||
# values:
|
||||
# - e2e-az1
|
||||
# - e2e-az2
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
@@ -136,10 +108,6 @@ serviceAccount:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
@@ -149,25 +117,3 @@ livenessProbe:
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
serverName: null
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'descheduler_(build_info|pods_evicted)'
|
||||
# sourceLabels: [__name__]
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
@@ -19,15 +19,13 @@ package options
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
componentbaseoptions "k8s.io/component-base/config/options"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig"
|
||||
"sigs.k8s.io/descheduler/pkg/apis/componentconfig/v1alpha1"
|
||||
deschedulerscheme "sigs.k8s.io/descheduler/pkg/descheduler/scheme"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -60,17 +58,7 @@ func NewDeschedulerServer() (*DeschedulerServer, error) {
|
||||
}
|
||||
|
||||
func newDefaultComponentConfig() (*componentconfig.DeschedulerConfiguration, error) {
|
||||
versionedCfg := v1alpha1.DeschedulerConfiguration{
|
||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: false,
|
||||
LeaseDuration: metav1.Duration{Duration: 137 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 107 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 26 * time.Second},
|
||||
ResourceLock: "leases",
|
||||
ResourceName: "descheduler",
|
||||
ResourceNamespace: "kube-system",
|
||||
},
|
||||
}
|
||||
versionedCfg := v1alpha1.DeschedulerConfiguration{}
|
||||
deschedulerscheme.Scheme.Default(&versionedCfg)
|
||||
cfg := componentconfig.DeschedulerConfiguration{}
|
||||
if err := deschedulerscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
|
||||
@@ -88,7 +76,5 @@ func (rs *DeschedulerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&rs.DryRun, "dry-run", rs.DryRun, "execute descheduler in dry run mode.")
|
||||
fs.BoolVar(&rs.DisableMetrics, "disable-metrics", rs.DisableMetrics, "Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.")
|
||||
|
||||
componentbaseoptions.BindLeaderElectionFlags(&rs.LeaderElection, fs)
|
||||
|
||||
rs.SecureServing.AddFlags(fs)
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
if factory == nil {
|
||||
klog.ClearLogger()
|
||||
} else {
|
||||
log, logrFlush := factory.Create(config.LoggingConfiguration{})
|
||||
log, logrFlush := factory.Create(config.FormatOptions{})
|
||||
defer logrFlush()
|
||||
klog.SetLogger(log)
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func NewDeschedulerCommand(out io.Writer) *cobra.Command {
|
||||
|
||||
healthz.InstallHandler(pathRecorderMux, healthz.NamedCheck("Descheduler", healthz.PingHealthz.Check))
|
||||
|
||||
stoppedCh, _, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
stoppedCh, err := SecureServing.Serve(pathRecorderMux, 0, ctx.Done())
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to start secure server: %v", err)
|
||||
return
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Proposals
|
||||
This document walk you through about all the enhancements proposals for descheduler.
|
||||
|
||||
## Descheduler v1alpha2 Design Proposal
|
||||
```yaml
|
||||
title: Descheduler v1alpha2 Design Proposal
|
||||
authors:
|
||||
- "@damemi"
|
||||
link:
|
||||
- https://docs.google.com/document/d/1S1JCh-0F-QCJvBBG-kbmXiHAJFF8doArhDIAKbOj93I/edit#heading=h.imbp1ctnc8lx
|
||||
- https://github.com/kubernetes-sigs/descheduler/issues/679
|
||||
owning-sig: sig-scheduling
|
||||
creation-date: 2021-05-01
|
||||
status: implementable
|
||||
```
|
||||
|
||||
@@ -4,8 +4,6 @@ Starting with descheduler release v0.10.0 container images are available in the
|
||||
|
||||
Descheduler Version | Container Image | Architectures |
|
||||
------------------- |--------------------------------------------|-------------------------|
|
||||
v0.24.1 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.24.0 | k8s.gcr.io/descheduler/descheduler:v0.24.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.23.1 | k8s.gcr.io/descheduler/descheduler:v0.23.1 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.22.0 | k8s.gcr.io/descheduler/descheduler:v0.22.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
v0.21.0 | k8s.gcr.io/descheduler/descheduler:v0.21.0 | AMD64<br>ARM64<br>ARMv7 |
|
||||
@@ -37,52 +35,31 @@ Usage:
|
||||
descheduler [command]
|
||||
|
||||
Available Commands:
|
||||
completion generate the autocompletion script for the specified shell
|
||||
help Help about any command
|
||||
version Version of descheduler
|
||||
|
||||
Flags:
|
||||
--add-dir-header If true, adds the file directory to the header of the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--alsologtostderr log to standard error as well as files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--bind-address ip The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. (default 0.0.0.0)
|
||||
--cert-dir string The directory where the TLS certs are located. If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored. (default "apiserver.local.config/certificates")
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--disable-metrics Disables metrics. The metrics are by default served through https://localhost:10258/metrics. Secure address, resp. port can be changed through --bind-address, resp. --secure-port flags.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
-h, --help help for descheduler
|
||||
--http2-max-streams-per-connection int The limit that the server gives to clients for the maximum number of streams in an HTTP/2 connection. Zero means to use golang's default.
|
||||
--kubeconfig string File with kube configuration.
|
||||
--leader-elect Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.
|
||||
--leader-elect-lease-duration duration The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled. (default 15s)
|
||||
--leader-elect-renew-deadline duration The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled. (default 10s)
|
||||
--leader-elect-resource-lock string The type of resource object that is used for locking during leader election. Supported options are 'endpoints', 'configmaps', 'leases', 'endpointsleases' and 'configmapsleases'. (default "leases")
|
||||
--leader-elect-resource-name string The name of resource object that is used for locking during leader election. (default "descheduler")
|
||||
--leader-elect-resource-namespace string The namespace of resource object that is used for locking during leader election. (default "kube-system")
|
||||
--leader-elect-retry-period duration The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled. (default 2s)
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log-dir string If non-empty, write log files in this directory (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log-file string If non-empty, use this log file (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logging-format string Sets the log format. Permitted formats: "text", "json". Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --skip-headers, --skip-log-headers, --stderrthreshold, --log-flush-frequency.\nNon-default choices are currently alpha and subject to change without warning. (default "text")
|
||||
--logtostderr log to standard error instead of files (default true) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--one-output If true, only write logs to their native severity level (vs also writing to each lower severity level) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--permit-address-sharing If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]
|
||||
--permit-port-sharing If true, SO_REUSEPORT will be used when binding the port, which allows more than one instance to bind on the same address and port. [default=false]
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--secure-port int The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. (default 10258)
|
||||
--skip-headers If true, avoid header prefixes in the log messages (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--skip-log-headers If true, avoid headers when opening log files (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2) (DEPRECATED: will be removed in a future release, see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components)
|
||||
--tls-cert-file string File containing the default x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory specified by --cert-dir.
|
||||
--tls-cipher-suites strings Comma-separated list of cipher suites for the server. If omitted, the default Go cipher suites will be used.
|
||||
Preferred values: TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256, TLS_RSA_WITH_AES_256_CBC_SHA, TLS_RSA_WITH_AES_256_GCM_SHA384.
|
||||
Insecure values: TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_3DES_EDE_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256, TLS_RSA_WITH_RC4_128_SHA.
|
||||
--tls-min-version string Minimum TLS version supported. Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13
|
||||
--tls-private-key-file string File containing the default x509 private key matching --tls-cert-file.
|
||||
--tls-sni-cert-key namedCertKey A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". (default [])
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
--add-dir-header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--descheduling-interval duration Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.
|
||||
--dry-run execute descheduler in dry run mode.
|
||||
--evict-local-storage-pods DEPRECATED: enables evicting pods using local storage by descheduler
|
||||
-h, --help help for descheduler
|
||||
--kubeconfig string File with kube configuration.
|
||||
--log-backtrace-at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log-dir string If non-empty, write log files in this directory
|
||||
--log-file string If non-empty, use this log file
|
||||
--log-file-max-size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--log-flush-frequency duration Maximum number of seconds between log flushes (default 5s)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--max-pods-to-evict-per-node int DEPRECATED: limits the maximum number of pods to be evicted per node by descheduler
|
||||
--node-selector string DEPRECATED: selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)
|
||||
--policy-config-file string File with descheduler policy configuration.
|
||||
--skip-headers If true, avoid header prefixes in the log messages
|
||||
--skip-log-headers If true, avoid headers when opening log files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
|
||||
Use "descheduler [command] --help" for more information about a command.
|
||||
```
|
||||
@@ -113,8 +90,7 @@ strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
maxPodLifeTimeSeconds: 604800 # pods run for a maximum of 7 days
|
||||
```
|
||||
|
||||
### Balance Cluster By Node Memory Utilization
|
||||
@@ -142,7 +118,7 @@ strategies:
|
||||
|
||||
#### Balance low utilization nodes
|
||||
Using `HighNodeUtilization`, descheduler will rebalance the cluster based on memory by evicting pods
|
||||
from nodes with memory utilization lower than 20%. This should be use `NodeResourcesFit` with the `MostAllocated` scoring strategy based on these [doc](https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins).
|
||||
from nodes with memory utilization lower than 20%. This should be used along with scheduler strategy `MostRequestedPriority`.
|
||||
The evicted pods will be compacted into minimal set of nodes.
|
||||
|
||||
```
|
||||
@@ -161,14 +137,7 @@ strategies:
|
||||
Descheduler's `RemovePodsViolatingNodeTaints` strategy can be combined with
|
||||
[Node Problem Detector](https://github.com/kubernetes/node-problem-detector/) and
|
||||
[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) to automatically remove
|
||||
Nodes which have problems. Node Problem Detector can detect specific Node problems and report them to the API server.
|
||||
There is a feature called TaintNodeByCondition of the node controller that takes some conditions and turns them into taints. Currently, this only works for the default node conditions: PIDPressure, MemoryPressure, DiskPressure, Ready, and some cloud provider specific conditions.
|
||||
The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||
Nodes which have problems. Node Problem Detector can detect specific Node problems and taint any Nodes which have those
|
||||
problems. The Descheduler will then deschedule workloads from those Nodes. Finally, if the descheduled Node's resource
|
||||
allocation falls below the Cluster Autoscaler's scale down threshold, the Node will become a scale down candidate
|
||||
and can be removed by Cluster Autoscaler. These three components form an autohealing cycle for Node problems.
|
||||
---
|
||||
**NOTE**
|
||||
|
||||
Once [kubernetes/node-problem-detector#565](https://github.com/kubernetes/node-problem-detector/pull/565) is available in NPD, we need to update this section.
|
||||
|
||||
---
|
||||
|
||||
68
go.mod
68
go.mod
@@ -1,19 +1,19 @@
|
||||
module sigs.k8s.io/descheduler
|
||||
|
||||
go 1.18
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/client9/misspell v0.3.4
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
k8s.io/api v0.24.0
|
||||
k8s.io/apimachinery v0.24.0
|
||||
k8s.io/apiserver v0.24.0
|
||||
k8s.io/client-go v0.24.0
|
||||
k8s.io/code-generator v0.24.0
|
||||
k8s.io/component-base v0.24.0
|
||||
k8s.io/component-helpers v0.24.0
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
k8s.io/api v0.23.0
|
||||
k8s.io/apimachinery v0.23.0
|
||||
k8s.io/apiserver v0.23.0
|
||||
k8s.io/client-go v0.23.0
|
||||
k8s.io/code-generator v0.23.0
|
||||
k8s.io/component-base v0.23.0
|
||||
k8s.io/component-helpers v0.23.0
|
||||
k8s.io/klog/v2 v2.30.0
|
||||
k8s.io/kubectl v0.20.5
|
||||
sigs.k8s.io/mdtoc v1.0.1
|
||||
)
|
||||
@@ -31,8 +31,8 @@ require (
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/coreos/go-semver v0.3.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@@ -50,10 +50,10 @@ require (
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/gomarkdown/markdown v0.0.0-20200824053859-8c8b3816f167 // indirect
|
||||
github.com/google/gnostic v0.5.7-v3refs // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/google/uuid v1.1.2 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
@@ -67,13 +67,13 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.1 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.1 // indirect
|
||||
github.com/prometheus/common v0.28.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.0 // indirect
|
||||
go.opentelemetry.io/contrib v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
|
||||
@@ -88,30 +88,30 @@ require (
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
|
||||
google.golang.org/grpc v1.40.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
||||
155
go.sum
155
go.sum
@@ -39,6 +39,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
@@ -80,7 +81,6 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@@ -91,16 +91,16 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
@@ -119,12 +119,12 @@ github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -246,8 +246,6 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
|
||||
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -284,6 +282,9 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
@@ -358,6 +359,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
@@ -378,11 +380,12 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4=
|
||||
github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -416,6 +419,7 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -430,9 +434,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -443,23 +446,21 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw=
|
||||
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -480,16 +481,19 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd
|
||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -512,21 +516,18 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1 h1:v28cktvBq+7vGyJXF8G+rWJmj+1XUmMtqcLnH8hDocM=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1 h1:XIQcHCFSG53bJETYeRJtIxdLv2EWRGxcfzR8lSnTH4E=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.1 h1:oImGuV5LGKjCqXdjkMHCyWa5OO1gYKCnC/1sgdfj1Uk=
|
||||
go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
|
||||
@@ -587,10 +588,8 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898 h1:SLP7Q4Di66FONjDJbCYrCRrh97focO6sLogHO7/g8F0=
|
||||
golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -626,9 +625,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -673,10 +671,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -688,9 +685,10 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -744,6 +742,7 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -764,14 +763,12 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e h1:XMgFehsDnnLGtjvjOfqWSUzt0alpTR1RSEuznObga2c=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -787,8 +784,8 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -848,8 +845,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717 h1:hI3jKY4Hpf63ns040onEbB3dAkR/H/P83hw1TG8dD3Y=
|
||||
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -876,6 +873,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -927,8 +925,8 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -977,6 +975,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
@@ -1008,59 +1007,59 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.5/go.mod h1:FQjAceXnVaWDeov2YUWhOb6Yt+5UjErkp6UO3nczO1Y=
|
||||
k8s.io/api v0.24.0 h1:J0hann2hfxWr1hinZIDefw7Q96wmCBx6SSB8IY0MdDg=
|
||||
k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I=
|
||||
k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro=
|
||||
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
|
||||
k8s.io/apimachinery v0.20.5/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.24.0 h1:ydFCyC/DjCvFCHK5OPMKBlxayQytB8pxy8YQInd5UyQ=
|
||||
k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
|
||||
k8s.io/apiserver v0.24.0 h1:GR7kGsjOMfilRvlG3Stxv/3uz/ryvJ/aZXc5pqdsNV0=
|
||||
k8s.io/apiserver v0.24.0/go.mod h1:WFx2yiOMawnogNToVvUYT9nn1jaIkMKj41ZYCVycsBA=
|
||||
k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ=
|
||||
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
|
||||
k8s.io/apiserver v0.23.0 h1:Ds/QveXWi9aJ8ISB0CJa4zBNc5njxAs5u3rmMIexqCY=
|
||||
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
|
||||
k8s.io/cli-runtime v0.20.5/go.mod h1:ihjPeQWDk7NGVIkNEvpwxA3gJvqtU+LtkDj11TvyXn4=
|
||||
k8s.io/client-go v0.20.5/go.mod h1:Ee5OOMMYvlH8FCZhDsacjMlCBwetbGZETwo1OA+e6Zw=
|
||||
k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U=
|
||||
k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw=
|
||||
k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY=
|
||||
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
|
||||
k8s.io/code-generator v0.20.5/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
|
||||
k8s.io/code-generator v0.24.0 h1:7v52LjqCntfGxV9x8c57gkhDqkMHd0Z2jfRqGr6it6g=
|
||||
k8s.io/code-generator v0.24.0/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
|
||||
k8s.io/code-generator v0.23.0 h1:lhyd2KJVCEmpjaCpuoooGs+e3xhPwpYvupnNRidO0Ds=
|
||||
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
|
||||
k8s.io/component-base v0.20.5/go.mod h1:l0isoBLGyQKwRoTWbPHR6jNDd3/VqQD43cNlsjddGng=
|
||||
k8s.io/component-base v0.24.0 h1:h5jieHZQoHrY/lHG+HyrSbJeyfuitheBvqvKwKHVC0g=
|
||||
k8s.io/component-base v0.24.0/go.mod h1:Dgazgon0i7KYUsS8krG8muGiMVtUZxG037l1MKyXgrA=
|
||||
k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8=
|
||||
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
|
||||
k8s.io/component-helpers v0.20.5/go.mod h1:AzTdoPj6YAN2SUfhBX/FUUU3ntfFuse03q/VMLovEsE=
|
||||
k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I=
|
||||
k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c=
|
||||
k8s.io/component-helpers v0.23.0 h1:qNbqN10QTefiWcCOPkHL/0nn81sdKVv6ZgEXcSyot/U=
|
||||
k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kubectl v0.20.5 h1:/wndy8hw5TsL8G8KWPDJrtPKS8D34uSdWS0BMRmtzWs=
|
||||
k8s.io/kubectl v0.20.5/go.mod h1:mlNQgyV18D4XFt5BmfSkrxQNS+arT2pXDQxxnH5lMiw=
|
||||
k8s.io/metrics v0.20.5/go.mod h1:vsptOayjKWKWHvWR1vFQY++vxydzaEo/2+JC7kSDKPU=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30 h1:dUk62HQ3ZFhD48Qr8MIXCiKA8wInBQCtuE4QGfFW7yA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25 h1:DEQ12ZRxJjsglk5JIi5bLgpKaHihGervKmg5uryaEHw=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
|
||||
sigs.k8s.io/mdtoc v1.0.1 h1:6ECKhQnbetwZBR6R2IeT2LH+1w+2Zsip0iXjikgaXIk=
|
||||
sigs.k8s.io/mdtoc v1.0.1/go.mod h1:COYBtOjsaCg7o7SC4eaLwEXPuVRSuiVuLLRrHd7kShw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -23,7 +23,7 @@ DESCHEDULER_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
GO_VERSION=($(go version))
|
||||
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.17|go1.18') ]]; then
|
||||
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.14|go1.15|go1.16|go1.17') ]]; then
|
||||
echo "Unknown go version '${GO_VERSION[2]}', skipping gofmt."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -22,13 +22,6 @@ rules:
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["descheduler"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
@@ -48,3 +41,4 @@ subjects:
|
||||
- name: descheduler-sa
|
||||
kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.23.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: descheduler-sa
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.23.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: descheduler
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.24.1
|
||||
image: k8s.gcr.io/descheduler/descheduler:v0.23.1
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
|
||||
@@ -88,18 +88,15 @@ type StrategyParameters struct {
|
||||
ThresholdPriorityClassName string
|
||||
LabelSelector *metav1.LabelSelector
|
||||
NodeFit bool
|
||||
IncludePreferNoSchedule bool
|
||||
ExcludedTaints []string
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool
|
||||
Thresholds ResourceThresholds
|
||||
TargetThresholds ResourceThresholds
|
||||
NumberOfNodes int
|
||||
Thresholds ResourceThresholds
|
||||
TargetThresholds ResourceThresholds
|
||||
NumberOfNodes int
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
|
||||
@@ -86,18 +86,15 @@ type StrategyParameters struct {
|
||||
ThresholdPriorityClassName string `json:"thresholdPriorityClassName"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
|
||||
NodeFit bool `json:"nodeFit"`
|
||||
IncludePreferNoSchedule bool `json:"includePreferNoSchedule"`
|
||||
ExcludedTaints []string `json:"excludedTaints,omitempty"`
|
||||
}
|
||||
|
||||
type Percentage float64
|
||||
type ResourceThresholds map[v1.ResourceName]Percentage
|
||||
|
||||
type NodeResourceUtilizationThresholds struct {
|
||||
UseDeviationThresholds bool `json:"useDeviationThresholds,omitempty"`
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
Thresholds ResourceThresholds `json:"thresholds,omitempty"`
|
||||
TargetThresholds ResourceThresholds `json:"targetThresholds,omitempty"`
|
||||
NumberOfNodes int `json:"numberOfNodes,omitempty"`
|
||||
}
|
||||
|
||||
type PodsHavingTooManyRestarts struct {
|
||||
|
||||
@@ -261,7 +261,6 @@ func Convert_api_Namespaces_To_v1alpha1_Namespaces(in *api.Namespaces, out *Name
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtilizationThresholds(in *NodeResourceUtilizationThresholds, out *api.NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.UseDeviationThresholds = in.UseDeviationThresholds
|
||||
out.Thresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*api.ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
out.NumberOfNodes = in.NumberOfNodes
|
||||
@@ -274,7 +273,6 @@ func Convert_v1alpha1_NodeResourceUtilizationThresholds_To_api_NodeResourceUtili
|
||||
}
|
||||
|
||||
func autoConvert_api_NodeResourceUtilizationThresholds_To_v1alpha1_NodeResourceUtilizationThresholds(in *api.NodeResourceUtilizationThresholds, out *NodeResourceUtilizationThresholds, s conversion.Scope) error {
|
||||
out.UseDeviationThresholds = in.UseDeviationThresholds
|
||||
out.Thresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.Thresholds))
|
||||
out.TargetThresholds = *(*ResourceThresholds)(unsafe.Pointer(&in.TargetThresholds))
|
||||
out.NumberOfNodes = in.NumberOfNodes
|
||||
@@ -363,8 +361,6 @@ func autoConvert_v1alpha1_StrategyParameters_To_api_StrategyParameters(in *Strat
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
|
||||
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -386,8 +382,6 @@ func autoConvert_api_StrategyParameters_To_v1alpha1_StrategyParameters(in *api.S
|
||||
out.ThresholdPriorityClassName = in.ThresholdPriorityClassName
|
||||
out.LabelSelector = (*v1.LabelSelector)(unsafe.Pointer(in.LabelSelector))
|
||||
out.NodeFit = in.NodeFit
|
||||
out.IncludePreferNoSchedule = in.IncludePreferNoSchedule
|
||||
out.ExcludedTaints = *(*[]string)(unsafe.Pointer(&in.ExcludedTaints))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -356,11 +356,6 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludedTaints != nil {
|
||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -356,11 +356,6 @@ func (in *StrategyParameters) DeepCopyInto(out *StrategyParameters) {
|
||||
*out = new(v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExcludedTaints != nil {
|
||||
in, out := &in.ExcludedTaints, &out.ExcludedTaints
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -53,9 +53,6 @@ type DeschedulerConfiguration struct {
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration
|
||||
|
||||
@@ -54,9 +54,6 @@ type DeschedulerConfiguration struct {
|
||||
// IgnorePVCPods sets whether PVC pods should be allowed to be evicted
|
||||
IgnorePVCPods bool `json:"ignorePvcPods,omitempty"`
|
||||
|
||||
// LeaderElection starts Deployment using leader election loop
|
||||
LeaderElection componentbaseconfig.LeaderElectionConfiguration `json:"leaderElection,omitempty"`
|
||||
|
||||
// Logging specifies the options of logging.
|
||||
// Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information.
|
||||
Logging componentbaseconfig.LoggingConfiguration `json:"logging,omitempty"`
|
||||
|
||||
@@ -58,7 +58,6 @@ func autoConvert_v1alpha1_DeschedulerConfiguration_To_componentconfig_Deschedule
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
@@ -77,7 +76,6 @@ func autoConvert_componentconfig_DeschedulerConfiguration_To_v1alpha1_Deschedule
|
||||
out.MaxNoOfPodsToEvictPerNode = in.MaxNoOfPodsToEvictPerNode
|
||||
out.EvictLocalStoragePods = in.EvictLocalStoragePods
|
||||
out.IgnorePVCPods = in.IgnorePVCPods
|
||||
out.LeaderElection = in.LeaderElection
|
||||
out.Logging = in.Logging
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
func (in *DeschedulerConfiguration) DeepCopyInto(out *DeschedulerConfiguration) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.LeaderElection = in.LeaderElection
|
||||
in.Logging.DeepCopyInto(&out.Logging)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -69,22 +69,7 @@ func Run(ctx context.Context, rs *options.DeschedulerServer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
runFn := func() error {
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && rs.DeschedulingInterval.Seconds() == 0 {
|
||||
return fmt.Errorf("leaderElection must be used with deschedulingInterval")
|
||||
}
|
||||
|
||||
if rs.LeaderElection.LeaderElect && !rs.DryRun {
|
||||
if err := NewLeaderElection(runFn, rsclient, &rs.LeaderElection, ctx); err != nil {
|
||||
return fmt.Errorf("leaderElection: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return runFn()
|
||||
return RunDeschedulerStrategies(ctx, rs, deschedulerPolicy, evictionPolicyGroupVersion)
|
||||
}
|
||||
|
||||
type strategyFunction func(ctx context.Context, client clientset.Interface, strategy api.DeschedulerStrategy, nodes []*v1.Node, podEvictor *evictions.PodEvictor, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc)
|
||||
@@ -283,7 +268,6 @@ func RunDeschedulerStrategies(ctx context.Context, rs *options.DeschedulerServer
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNode,
|
||||
deschedulerPolicy.MaxNoOfPodsToEvictPerNamespace,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
evictLocalStoragePods,
|
||||
evictSystemCriticalPods,
|
||||
ignorePvcPods,
|
||||
|
||||
@@ -51,7 +51,6 @@ type namespacePodEvictCount map[string]uint
|
||||
type PodEvictor struct {
|
||||
client clientset.Interface
|
||||
nodes []*v1.Node
|
||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc
|
||||
policyGroupVersion string
|
||||
dryRun bool
|
||||
maxPodsToEvictPerNode *uint
|
||||
@@ -72,7 +71,6 @@ func NewPodEvictor(
|
||||
maxPodsToEvictPerNode *uint,
|
||||
maxPodsToEvictPerNamespace *uint,
|
||||
nodes []*v1.Node,
|
||||
nodeIndexer podutil.GetPodsAssignedToNodeFunc,
|
||||
evictLocalStoragePods bool,
|
||||
evictSystemCriticalPods bool,
|
||||
ignorePvcPods bool,
|
||||
@@ -89,7 +87,6 @@ func NewPodEvictor(
|
||||
return &PodEvictor{
|
||||
client: client,
|
||||
nodes: nodes,
|
||||
nodeIndexer: nodeIndexer,
|
||||
policyGroupVersion: policyGroupVersion,
|
||||
dryRun: dryRun,
|
||||
maxPodsToEvictPerNode: maxPodsToEvictPerNode,
|
||||
@@ -158,9 +155,9 @@ func (pe *PodEvictor) EvictPod(ctx context.Context, pod *v1.Pod, node *v1.Node,
|
||||
}
|
||||
|
||||
if pe.dryRun {
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
||||
klog.V(1).InfoS("Evicted pod in dry run mode", "pod", klog.KObj(pod), "reason", reason)
|
||||
} else {
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason, "strategy", strategy, "node", node.Name)
|
||||
klog.V(1).InfoS("Evicted pod", "pod", klog.KObj(pod), "reason", reason)
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartStructuredLogging(3)
|
||||
eventBroadcaster.StartRecordingToSink(&clientcorev1.EventSinkImpl{Interface: pe.client.CoreV1().Events(pod.Namespace)})
|
||||
@@ -299,7 +296,7 @@ func (pe *PodEvictor) Evictable(opts ...func(opts *Options)) *evictable {
|
||||
}
|
||||
if options.nodeFit {
|
||||
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
|
||||
if !nodeutil.PodFitsAnyOtherNode(pe.nodeIndexer, pod, pe.nodes) {
|
||||
if !nodeutil.PodFitsAnyOtherNode(pod, pe.nodes) {
|
||||
return fmt.Errorf("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable")
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -21,10 +21,8 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
@@ -82,9 +80,9 @@ func TestIsEvictable(t *testing.T) {
|
||||
nodeLabelKey := "datacenter"
|
||||
nodeLabelValue := "east"
|
||||
type testCase struct {
|
||||
description string
|
||||
pods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
runBefore func(*v1.Pod, []*v1.Node)
|
||||
evictFailedBarePods bool
|
||||
evictLocalStoragePods bool
|
||||
evictSystemCriticalPods bool
|
||||
@@ -94,309 +92,261 @@ func TestIsEvictable(t *testing.T) {
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
description: "Failed pod eviction with no ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
}),
|
||||
{ // Failed pod eviction with no ownerRefs.
|
||||
pod: test.BuildTestPod("bare_pod_failed", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
},
|
||||
evictFailedBarePods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Normal pod eviction with no ownerRefs and evictFailedBarePods enabled",
|
||||
pods: []*v1.Pod{test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil)},
|
||||
}, { // Normal pod eviction with no ownerRefs and evictFailedBarePods enabled
|
||||
pod: test.BuildTestPod("bare_pod", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
},
|
||||
evictFailedBarePods: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Failed pod eviction with no ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
}),
|
||||
}, { // Failed pod eviction with no ownerRefs
|
||||
pod: test.BuildTestPod("bare_pod_failed_but_can_be_evicted", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
},
|
||||
evictFailedBarePods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with normal ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with normal ownerRefs
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p2", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with normal ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p2", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with replicaSet ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p3", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with replicaSet ownerRefs
|
||||
pod: test.BuildTestPod("p3", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p4", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with replicaSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p4", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetReplicaSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with statefulSet ownerRefs",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p18", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with statefulSet ownerRefs
|
||||
pod: test.BuildTestPod("p18", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p19", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
}),
|
||||
}, { // Normal pod eviction with statefulSet ownerRefs and descheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p19", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetStatefulSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted because it is bound to a PV and evictLocalStoragePods = false",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p5", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}, { // Pod not evicted because it is bound to a PV and evictLocalStoragePods = false
|
||||
pod: test.BuildTestPod("p5", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p6", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = true
|
||||
pod: test.BuildTestPod("p6", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: true,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p7", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}, { // Pod is evicted because it is bound to a PV and evictLocalStoragePods = false, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p7", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is part of a daemonSet",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p8", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
}),
|
||||
}, { // Pod not evicted becasuse it is part of a daemonSet
|
||||
pod: test.BuildTestPod("p8", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p9", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it is part of a daemonSet, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p9", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.ObjectMeta.OwnerReferences = test.GetDaemonSetOwnerRefList()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it is a mirror poddsa",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p10", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
}, { // Pod not evicted becasuse it is a mirror pod
|
||||
pod: test.BuildTestPod("p10", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p11", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it is a mirror pod, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p11", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
pod.Annotations["descheduler.alpha.kubernetes.io/evict"] = "true"
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has system critical priority",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p12", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
}, { // Pod not evicted becasuse it has system critical priority
|
||||
pod: test.BuildTestPod("p12", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p13", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
}
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p13", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
pod.Annotations = map[string]string{
|
||||
"descheduler.alpha.kubernetes.io/evict": "true",
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod not evicted becasuse it has a priority higher than the configured priority threshold",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p14", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
}, { // Pod not evicted becasuse it has a priority higher than the configured priority threshold
|
||||
pod: test.BuildTestPod("p14", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p15", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p15", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true
|
||||
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p16", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has system critical priority, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p16", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true
|
||||
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p17", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
}),
|
||||
}, { // Pod is evicted becasuse it has a priority higher than the configured priority threshold, but evictSystemCriticalPods = true and it has scheduler.alpha.kubernetes.io/evict annotation
|
||||
pod: test.BuildTestPod("p17", 400, 0, n1.Name, nil),
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
|
||||
pod.Spec.Priority = &highPriority
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
priorityThreshold: &lowPriority,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with no tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
}, { // Pod with no tolerations running on normal node, all other nodes tainted
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, nil),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
@@ -404,8 +354,27 @@ func TestIsEvictable(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, { // Pod with correct tolerations running on normal node, all other nodes tainted
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
@@ -413,259 +382,81 @@ func TestIsEvictable(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct tolerations running on normal node, all other nodes tainted",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with incorrect node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
}, { // Pod with incorrect node selector
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: "fail",
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 1000, 2000, 13, func(node *v1.Node) {
|
||||
}, { // Pod with correct node selector
|
||||
pod: test.BuildTestPod("p1", 400, 0, n1.Name, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node2", 1000, 2000, 13, nil), test.BuildTestNode("node3", 1000, 2000, 13, nil)},
|
||||
runBefore: func(pod *v1.Pod, nodes []*v1.Node) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
|
||||
for _, node := range nodes {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 1000, 2000, 13, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
}
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but only available node doesn't have enough CPU",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3-TEST", 10, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
}, {
|
||||
description: "Pod with correct node selector, and one node has enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 20, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 20, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 20, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: true,
|
||||
}, {
|
||||
description: "Pod with correct node selector, but both nodes don't have enough memory",
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 12, 8, n1.Name, func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node2-pod-10GB-mem", 10, 10, "node2", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
test.BuildTestPod("node3-pod-10GB-mem", 10, 10, "node3", func(pod *v1.Pod) {
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"test": "true",
|
||||
}
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node2", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode("node3", 100, 16, 10, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
nodeFit: true,
|
||||
result: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test.runBefore(test.pod, test.nodes)
|
||||
nodes := append(test.nodes, n1)
|
||||
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
podEvictor := &PodEvictor{
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
evictFailedBarePods: test.evictFailedBarePods,
|
||||
nodes: nodes,
|
||||
}
|
||||
|
||||
nodes := append(test.nodes, n1)
|
||||
evictable := podEvictor.Evictable()
|
||||
var opts []func(opts *Options)
|
||||
if test.priorityThreshold != nil {
|
||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||
}
|
||||
if test.nodeFit {
|
||||
opts = append(opts, WithNodeFit(true))
|
||||
}
|
||||
evictable = podEvictor.Evictable(opts...)
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range test.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
result := evictable.IsEvictable(test.pod)
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pod.Name, test.result, result)
|
||||
}
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
podEvictor := &PodEvictor{
|
||||
client: fakeClient,
|
||||
nodes: nodes,
|
||||
nodeIndexer: getPodsAssignedToNode,
|
||||
policyGroupVersion: policyv1.SchemeGroupVersion.String(),
|
||||
dryRun: false,
|
||||
maxPodsToEvictPerNode: nil,
|
||||
maxPodsToEvictPerNamespace: nil,
|
||||
evictLocalStoragePods: test.evictLocalStoragePods,
|
||||
evictSystemCriticalPods: test.evictSystemCriticalPods,
|
||||
evictFailedBarePods: test.evictFailedBarePods,
|
||||
}
|
||||
|
||||
var opts []func(opts *Options)
|
||||
if test.priorityThreshold != nil {
|
||||
opts = append(opts, WithPriorityThreshold(*test.priorityThreshold))
|
||||
}
|
||||
if test.nodeFit {
|
||||
opts = append(opts, WithNodeFit(true))
|
||||
}
|
||||
evictable := podEvictor.Evictable(opts...)
|
||||
|
||||
result := evictable.IsEvictable(test.pods[0])
|
||||
if result != test.result {
|
||||
t.Errorf("IsEvictable should return for pod %s %t, but it returns %t", test.pods[0].Name, test.result, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestPodTypes(t *testing.T) {
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package descheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/klog/v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
// NewLeaderElection starts the leader election code loop
|
||||
func NewLeaderElection(
|
||||
run func() error,
|
||||
client clientset.Interface,
|
||||
LeaderElectionConfig *componentbaseconfig.LeaderElectionConfiguration,
|
||||
ctx context.Context,
|
||||
) error {
|
||||
var id string
|
||||
|
||||
if hostname, err := os.Hostname(); err != nil {
|
||||
// on errors, make sure we're unique
|
||||
id = string(uuid.NewUUID())
|
||||
} else {
|
||||
// add a uniquifier so that two processes on the same host don't accidentally both become active
|
||||
id = hostname + "_" + string(uuid.NewUUID())
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Assigned unique lease holder id: %s", id)
|
||||
|
||||
if len(LeaderElectionConfig.ResourceNamespace) == 0 {
|
||||
return fmt.Errorf("namespace may not be empty")
|
||||
}
|
||||
|
||||
if len(LeaderElectionConfig.ResourceName) == 0 {
|
||||
return fmt.Errorf("name may not be empty")
|
||||
}
|
||||
|
||||
lock, err := resourcelock.New(
|
||||
LeaderElectionConfig.ResourceLock,
|
||||
LeaderElectionConfig.ResourceNamespace,
|
||||
LeaderElectionConfig.ResourceName,
|
||||
client.CoreV1(),
|
||||
client.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create leader election lock: %v", err)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: LeaderElectionConfig.LeaseDuration.Duration,
|
||||
RenewDeadline: LeaderElectionConfig.RenewDeadline.Duration,
|
||||
RetryPeriod: LeaderElectionConfig.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
klog.V(1).InfoS("Started leading")
|
||||
err := run()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
klog.V(1).InfoS("Leader lost")
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
// Just got the lock
|
||||
if identity == id {
|
||||
return
|
||||
}
|
||||
klog.V(1).Infof("New leader elected: %v", identity)
|
||||
},
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
@@ -18,16 +18,13 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -99,90 +96,32 @@ func IsReady(node *v1.Node) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeFit returns true if the provided pod can be scheduled onto the provided node.
|
||||
// This function is used when the NodeFit pod filtering feature of the Descheduler is enabled.
|
||||
// This function currently considers a subset of the Kubernetes Scheduler's predicates when
|
||||
// deciding if a pod would fit on a node, but more predicates may be added in the future.
|
||||
func NodeFit(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) []error {
|
||||
// Check node selector and required affinity
|
||||
var errors []error
|
||||
if ok, err := utils.PodMatchNodeSelector(pod, node); err != nil {
|
||||
errors = append(errors, err)
|
||||
} else if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod node selector does not match the node label"))
|
||||
}
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok := utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
errors = append(errors, fmt.Errorf("pod does not tolerate taints on the node"))
|
||||
}
|
||||
// Check if the pod can fit on a node based off it's requests
|
||||
ok, reqErrors := fitsRequest(nodeIndexer, pod, node)
|
||||
if !ok {
|
||||
errors = append(errors, reqErrors...)
|
||||
}
|
||||
// Check if node is schedulable
|
||||
if IsNodeUnschedulable(node) {
|
||||
errors = append(errors, fmt.Errorf("node is not schedulable"))
|
||||
}
|
||||
// PodFitsAnyOtherNode checks if the given pod fits any of the given nodes, besides the node
|
||||
// the pod is already running on. The node fit is based on multiple criteria, like, pod node selector
|
||||
// matching the node label (including affinity), the taints on the node, and the node being schedulable or not.
|
||||
func PodFitsAnyOtherNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// PodFitsAnyOtherNode checks if the given pod will fit any of the given nodes, besides the node
|
||||
// the pod is already running on. The predicates used to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyOtherNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
// Skip node pod is already on
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
continue
|
||||
}
|
||||
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
// Check node selector and required affinity
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsAnyNode checks if the given pod will fit any of the given nodes. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsAnyNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
}
|
||||
// Check taints (we only care about NoSchedule and NoExecute taints)
|
||||
ok = utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
})
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod will fit onto the given node. The predicates used
|
||||
// to determine if the pod will fit can be found in the NodeFit function.
|
||||
func PodFitsCurrentNode(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) bool {
|
||||
errors := NodeFit(nodeIndexer, pod, node)
|
||||
if len(errors) == 0 {
|
||||
klog.V(4).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
} else {
|
||||
klog.V(4).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
for _, err := range errors {
|
||||
klog.V(4).InfoS(err.Error())
|
||||
// Check if node is schedulable
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -194,95 +133,39 @@ func IsNodeUnschedulable(node *v1.Node) bool {
|
||||
return node.Spec.Unschedulable
|
||||
}
|
||||
|
||||
// fitsRequest determines if a pod can fit on a node based on its resource requests. It returns true if
|
||||
// the pod will fit.
|
||||
func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, node *v1.Node) (bool, []error) {
|
||||
var insufficientResources []error
|
||||
// PodFitsAnyNode checks if the given pod fits any of the given nodes, based on
|
||||
// multiple criteria, like, pod node selector matching the node label, node
|
||||
// being schedulable or not.
|
||||
func PodFitsAnyNode(pod *v1.Pod, nodes []*v1.Node) bool {
|
||||
for _, node := range nodes {
|
||||
|
||||
// Get pod requests
|
||||
podRequests, _ := utils.PodRequestsAndLimits(pod)
|
||||
resourceNames := make([]v1.ResourceName, 0, len(podRequests))
|
||||
for name := range podRequests {
|
||||
resourceNames = append(resourceNames, name)
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
if err != nil || !ok {
|
||||
continue
|
||||
}
|
||||
if !IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Pod can possibly be scheduled on a different node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PodFitsCurrentNode checks if the given pod fits on the given node if the pod
|
||||
// node selector matches the node label.
|
||||
func PodFitsCurrentNode(pod *v1.Pod, node *v1.Node) bool {
|
||||
ok, err := utils.PodMatchNodeSelector(pod, node)
|
||||
|
||||
availableResources, err := nodeAvailableResources(nodeIndexer, node, resourceNames)
|
||||
if err != nil {
|
||||
return false, []error{err}
|
||||
}
|
||||
|
||||
podFitsOnNode := true
|
||||
for _, resource := range resourceNames {
|
||||
podResourceRequest := podRequests[resource]
|
||||
availableResource, ok := availableResources[resource]
|
||||
if !ok || podResourceRequest.MilliValue() > availableResource.MilliValue() {
|
||||
insufficientResources = append(insufficientResources, fmt.Errorf("insufficient %v", resource))
|
||||
podFitsOnNode = false
|
||||
}
|
||||
}
|
||||
return podFitsOnNode, insufficientResources
|
||||
}
|
||||
|
||||
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
|
||||
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName) (map[v1.ResourceName]*resource.Quantity, error) {
|
||||
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeUtilization := NodeUtilization(podsOnNode, resourceNames)
|
||||
remainingResources := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
if _, exists := node.Status.Allocatable[name]; exists {
|
||||
allocatableResource := node.Status.Allocatable[name]
|
||||
remainingResources[name] = resource.NewQuantity(allocatableResource.Value()-nodeUtilization[name].Value(), resource.DecimalSI)
|
||||
} else {
|
||||
remainingResources[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return remainingResources, nil
|
||||
}
|
||||
|
||||
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
|
||||
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !IsBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalReqs
|
||||
}
|
||||
|
||||
// IsBasicResource checks if resource is basic native.
|
||||
func IsBasicResource(name v1.ResourceName) bool {
|
||||
switch name {
|
||||
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||
return true
|
||||
default:
|
||||
klog.ErrorS(err, "Failed to match node selector")
|
||||
return false
|
||||
}
|
||||
|
||||
if !ok {
|
||||
klog.V(2).InfoS("Pod does not fit on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Pod fits on node", "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -21,12 +21,9 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/test"
|
||||
)
|
||||
|
||||
@@ -150,13 +147,13 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
@@ -184,48 +181,27 @@ func TestPodFitsCurrentNode(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
node: test.BuildTestNode("node1", 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
objs = append(objs, tc.node)
|
||||
objs = append(objs, tc.pod)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
actual := PodFitsCurrentNode(getPodsAssignedToNode, tc.pod, tc.node)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
})
|
||||
actual := PodFitsCurrentNode(tc.pod, tc.node)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
|
||||
nodeLabelKey := "kubernetes.io/desiredNode"
|
||||
nodeLabelValue := "yes"
|
||||
nodeTaintKey := "hardware"
|
||||
@@ -239,527 +215,238 @@ func TestPodFitsAnyOtherNode(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
success bool
|
||||
podsOnNodes []*v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "Pod fits another node matching node affinity",
|
||||
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{},
|
||||
success: true,
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit one of the nodes",
|
||||
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{},
|
||||
success: true,
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Pod expected to fit none of the nodes",
|
||||
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "unfit1",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "unfit2",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "unfit2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{},
|
||||
success: false,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Nodes are unschedulable but labels match, should fail",
|
||||
pod: test.BuildTestPod("p1", 0, 0, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
}),
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Unschedulable = true
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: "no",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{},
|
||||
success: false,
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, should pass",
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Both nodes are tained, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
pod: createPodManifest(nodeNames[2], nodeLabelKey, nodeLabelValue),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, there is a pod on the available node, and requests are low, should pass",
|
||||
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[1], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("test-pod", 12*1000, 20*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[0],
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(40*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, but CPU requests are too big, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
// Notice that this node only has 4 cores, the pod already on the node below requests 3 cores, and the pod above requests 2 cores
|
||||
test.BuildTestNode(nodeNames[1], 4000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 3000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[1],
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, but memory requests are too big, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 5*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
},
|
||||
},
|
||||
}
|
||||
}),
|
||||
// Notice that this node only has 8GB of memory, the pod already on the node below requests 4GB, and the pod above requests 5GB
|
||||
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("4GB-mem-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, but ephemeral storage requests are too big, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 4*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
// Notice that this node only has 20GB of storage, the pod already on the node below requests 11GB, and the pod above requests 10GB
|
||||
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(20*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("11GB-storage-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(11*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, but custom resource requests are too big, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
|
||||
}),
|
||||
// Notice that this node only has 15 of the custom resource, the pod already on the node below requests 10, and the pod above requests 10
|
||||
test.BuildTestNode(nodeNames[1], 10*1000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
node.Status.Allocatable["example.com/custom-resource"] = *resource.NewQuantity(15, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("10-custom-resource-pod", 0, 0, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests["example.com/custom-resource"] = *resource.NewQuantity(10, resource.DecimalSI)
|
||||
}),
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, and pod Overhead is low enough, should pass",
|
||||
pod: test.BuildTestPod("p1", 1000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
// Notice that this node has 5 CPU cores, the pod below requests 2 cores, and has CPU overhead of 1 cores, and the pod above requests 1 core
|
||||
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(1000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
description: "Two nodes matches node selector, one of them is tained, CPU requests will fit, but pod Overhead is too high, should fail",
|
||||
pod: test.BuildTestPod("p1", 2000, 2*1000*1000*1000, nodeNames[2], func(pod *v1.Pod) {
|
||||
pod.Spec.NodeSelector = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(nodeNames[0], 64000, 128*1000*1000*1000, 200, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(1000*1000*1000*1000, resource.DecimalSI)
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: nodeTaintKey,
|
||||
Value: nodeTaintValue,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
// Notice that this node only has 5 CPU cores, the pod below requests 2 cores, but has CPU overhead of 2 cores, and the pod above requests 2 cores
|
||||
test.BuildTestNode(nodeNames[1], 5000, 8*1000*1000*1000, 12, func(node *v1.Node) {
|
||||
node.ObjectMeta.Labels = map[string]string{
|
||||
nodeLabelKey: nodeLabelValue,
|
||||
}
|
||||
|
||||
node.Status.Allocatable[v1.ResourceEphemeralStorage] = *resource.NewQuantity(200*1000*1000*1000, resource.DecimalSI)
|
||||
}),
|
||||
test.BuildTestNode(nodeNames[2], 0, 0, 0, nil),
|
||||
},
|
||||
podsOnNodes: []*v1.Pod{
|
||||
test.BuildTestPod("3-core-pod", 2000, 4*1000*1000*1000, nodeNames[1], func(pod *v1.Pod) {
|
||||
pod.ObjectMeta = metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Labels: map[string]string{
|
||||
"test": "true",
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[0].Resources.Requests[v1.ResourceEphemeralStorage] = *resource.NewQuantity(10*1000*1000*1000, resource.DecimalSI)
|
||||
pod.Spec.Overhead = createResourceList(2000, 1000*1000*1000, 1000*1000*1000)
|
||||
}),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeNames[2],
|
||||
},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var objs []runtime.Object
|
||||
for _, node := range tc.nodes {
|
||||
objs = append(objs, node)
|
||||
}
|
||||
for _, pod := range tc.podsOnNodes {
|
||||
objs = append(objs, pod)
|
||||
}
|
||||
objs = append(objs, tc.pod)
|
||||
|
||||
fakeClient := fake.NewSimpleClientset(objs...)
|
||||
|
||||
sharedInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0)
|
||||
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||
|
||||
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
|
||||
if err != nil {
|
||||
t.Errorf("Build get pods assigned to node function error: %v", err)
|
||||
}
|
||||
|
||||
sharedInformerFactory.Start(ctx.Done())
|
||||
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||
|
||||
actual := PodFitsAnyOtherNode(getPodsAssignedToNode, tc.pod, tc.nodes)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
})
|
||||
actual := PodFitsAnyOtherNode(tc.pod, tc.nodes)
|
||||
if actual != tc.success {
|
||||
t.Errorf("Test %#v failed", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createResourceList builds a small resource list of core resources
|
||||
func createResourceList(cpu int64, memory int64, ephemeralStorage int64) v1.ResourceList {
|
||||
resourceList := make(map[v1.ResourceName]resource.Quantity)
|
||||
resourceList[v1.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
|
||||
resourceList[v1.ResourceMemory] = *resource.NewQuantity(memory, resource.DecimalSI)
|
||||
resourceList[v1.ResourceEphemeralStorage] = *resource.NewQuantity(ephemeralStorage, resource.DecimalSI)
|
||||
return resourceList
|
||||
func createPodManifest(nodeName string, nodeSelectorKey string, nodeSelectorValue string) *v1.Pod {
|
||||
return (&v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: nodeSelectorKey,
|
||||
Operator: "In",
|
||||
Values: []string{
|
||||
nodeSelectorValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -67,7 +67,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node6 := test.BuildTestNode("n6", 200, 200, 10, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p1.Namespace = "dev"
|
||||
@@ -103,14 +102,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
p18 := test.BuildTestPod("TARGET", 100, 0, node1.Name, nil)
|
||||
p18.Namespace = "node-fit"
|
||||
|
||||
// This pod sits on node6 and is used to take up CPU requests on the node
|
||||
p19 := test.BuildTestPod("CPU-eater", 150, 150, node6.Name, nil)
|
||||
p19.Namespace = "test"
|
||||
|
||||
// Dummy pod for node6 used to do the opposite of p19
|
||||
p20 := test.BuildTestPod("CPU-saver", 100, 150, node6.Name, nil)
|
||||
p20.Namespace = "test"
|
||||
|
||||
// ### Evictable Pods ###
|
||||
|
||||
// Three Pods in the "default" Namespace, bound to same ReplicaSet. 2 should be evicted.
|
||||
@@ -272,20 +263,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available does not have enough CPU, and nodeFit set to true. 0 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p19},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
expectedEvictedPodCount: 0,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
{
|
||||
description: "Three pods in the `node-fit` Namespace, bound to same ReplicaSet. Only node available has enough CPU, and nodeFit set to true. 1 should be evicted.",
|
||||
pods: []*v1.Pod{p1, p2, p3, p20},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
expectedEvictedPodCount: 1,
|
||||
strategy: api.DeschedulerStrategy{Params: &api.StrategyParameters{NodeFit: true}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -320,7 +297,6 @@ func TestFindDuplicatePods(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
@@ -378,7 +354,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -387,7 +363,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
if node.ObjectMeta.Labels == nil {
|
||||
node.ObjectMeta.Labels = map[string]string{}
|
||||
}
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/control-plane"] = ""
|
||||
node.ObjectMeta.Labels["node-role.kubernetes.io/master"] = ""
|
||||
}
|
||||
|
||||
setWorkerLabel := func(node *v1.Node) {
|
||||
@@ -407,7 +383,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
@@ -431,7 +407,7 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: v1.NodeSelectorOpDoesNotExist,
|
||||
},
|
||||
{
|
||||
@@ -748,7 +724,6 @@ func TestRemoveDuplicatesUniformly(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -166,12 +166,9 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
{
|
||||
description: "nodeFit=true, 1 unschedulable node, 1 container terminated with reason NodeAffinity, 0 eviction",
|
||||
strategy: createStrategy(true, false, nil, nil, nil, true),
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("node1", 2000, 3000, 10, nil),
|
||||
test.BuildTestNode("node2", 2000, 2000, 10, func(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
}),
|
||||
},
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 2000, 3000, 10, func(node *v1.Node) {
|
||||
node.Spec.Unschedulable = true
|
||||
})},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
@@ -179,17 +176,6 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "nodeFit=true, only available node does not have enough resources, 1 container terminated with reason CreateContainerConfigError, 0 eviction",
|
||||
strategy: createStrategy(true, false, []string{"CreateContainerConfigError"}, nil, nil, true),
|
||||
nodes: []*v1.Node{test.BuildTestNode("node1", 1, 1, 10, nil), test.BuildTestNode("node2", 0, 0, 10, nil)},
|
||||
expectedEvictedPodCount: 0,
|
||||
pods: []*v1.Pod{
|
||||
buildTestPod("p1", "node1", newPodStatus("", "", nil, &v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{Reason: "CreateContainerConfigError"},
|
||||
}), nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "excluded owner kind=ReplicaSet, 1 init container terminated with owner kind=ReplicaSet, 0 eviction",
|
||||
strategy: createStrategy(true, true, nil, []string{"ReplicaSet"}, nil, false),
|
||||
@@ -275,7 +261,6 @@ func TestRemoveFailedPods(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -95,8 +95,8 @@ func RemovePodsViolatingNodeAffinity(ctx context.Context, client clientset.Inter
|
||||
getPodsAssignedToNode,
|
||||
podutil.WrapFilterFuncs(podFilter, func(pod *v1.Pod) bool {
|
||||
return evictable.IsEvictable(pod) &&
|
||||
!nodeutil.PodFitsCurrentNode(getPodsAssignedToNode, pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(getPodsAssignedToNode, pod, nodes)
|
||||
!nodeutil.PodFitsCurrentNode(pod, node) &&
|
||||
nodeutil.PodFitsAnyNode(pod, nodes)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -222,7 +222,6 @@ func TestRemovePodsViolatingNodeAffinity(t *testing.T) {
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -55,16 +55,13 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
return
|
||||
}
|
||||
|
||||
var includedNamespaces, excludedNamespaces, excludedTaints sets.String
|
||||
var includedNamespaces, excludedNamespaces sets.String
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if strategy.Params != nil {
|
||||
if strategy.Params.Namespaces != nil {
|
||||
includedNamespaces = sets.NewString(strategy.Params.Namespaces.Include...)
|
||||
excludedNamespaces = sets.NewString(strategy.Params.Namespaces.Exclude...)
|
||||
}
|
||||
if strategy.Params.ExcludedTaints != nil {
|
||||
excludedTaints = sets.NewString(strategy.Params.ExcludedTaints...)
|
||||
}
|
||||
labelSelector = strategy.Params.LabelSelector
|
||||
}
|
||||
|
||||
@@ -92,18 +89,6 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
return
|
||||
}
|
||||
|
||||
excludeTaint := func(taint *v1.Taint) bool {
|
||||
// Exclude taints by key *or* key=value
|
||||
return excludedTaints.Has(taint.Key) || (taint.Value != "" && excludedTaints.Has(fmt.Sprintf("%s=%s", taint.Key, taint.Value)))
|
||||
}
|
||||
|
||||
taintFilterFnc := func(taint *v1.Taint) bool { return (taint.Effect == v1.TaintEffectNoSchedule) && !excludeTaint(taint) }
|
||||
if strategy.Params != nil && strategy.Params.IncludePreferNoSchedule {
|
||||
taintFilterFnc = func(taint *v1.Taint) bool {
|
||||
return (taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectPreferNoSchedule) && !excludeTaint(taint)
|
||||
}
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
klog.V(1).InfoS("Processing node", "node", klog.KObj(node))
|
||||
pods, err := podutil.ListAllPodsOnANode(node.Name, getPodsAssignedToNode, podFilter)
|
||||
@@ -116,7 +101,7 @@ func RemovePodsViolatingNodeTaints(ctx context.Context, client clientset.Interfa
|
||||
if !utils.TolerationsTolerateTaintsWithFilter(
|
||||
pods[i].Spec.Tolerations,
|
||||
node.Spec.Taints,
|
||||
taintFilterFnc,
|
||||
func(taint *v1.Taint) bool { return taint.Effect == v1.TaintEffectNoSchedule },
|
||||
) {
|
||||
klog.V(2).InfoS("Not all taints with NoSchedule effect are tolerated after update for pod on node", "pod", klog.KObj(pods[i]), "node", klog.KObj(node))
|
||||
if _, err := podEvictor.EvictPod(ctx, pods[i], node, "NodeTaint"); err != nil {
|
||||
|
||||
@@ -27,14 +27,6 @@ func createNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||
}
|
||||
}
|
||||
|
||||
func createPreferNoScheduleTaint(key, value string, index int) v1.Taint {
|
||||
return v1.Taint{
|
||||
Key: "testTaint" + fmt.Sprintf("%v", index),
|
||||
Value: "test" + fmt.Sprintf("%v", index),
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}
|
||||
}
|
||||
|
||||
func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
|
||||
taints := []v1.Taint{}
|
||||
for _, index := range indices {
|
||||
@@ -44,12 +36,12 @@ func addTaintsToNode(node *v1.Node, key, value string, indices []int) *v1.Node {
|
||||
return node
|
||||
}
|
||||
|
||||
func addTolerationToPod(pod *v1.Pod, key, value string, index int, effect v1.TaintEffect) *v1.Pod {
|
||||
func addTolerationToPod(pod *v1.Pod, key, value string, index int) *v1.Pod {
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: effect}}
|
||||
pod.Spec.Tolerations = []v1.Toleration{{Key: key + fmt.Sprintf("%v", index), Value: value + fmt.Sprintf("%v", index), Effect: v1.TaintEffectNoSchedule}}
|
||||
|
||||
return pod
|
||||
}
|
||||
@@ -71,16 +63,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
node5 := test.BuildTestNode("n5", 2000, 3000, 10, nil)
|
||||
node5.Spec.Taints = []v1.Taint{
|
||||
createPreferNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
|
||||
node6 := test.BuildTestNode("n6", 1, 1, 1, nil)
|
||||
node6.Spec.Taints = []v1.Taint{
|
||||
createPreferNoScheduleTaint("testTaint", "test", 1),
|
||||
}
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
p3 := test.BuildTestPod("p3", 100, 0, node1.Name, nil)
|
||||
@@ -127,20 +109,14 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
// A Mirror Pod.
|
||||
p10.Annotations = test.GetMirrorPodAnnotation()
|
||||
|
||||
p1 = addTolerationToPod(p1, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
p3 = addTolerationToPod(p3, "testTaint", "test", 1, v1.TaintEffectNoSchedule)
|
||||
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1, v1.TaintEffectNoSchedule)
|
||||
p1 = addTolerationToPod(p1, "testTaint", "test", 1)
|
||||
p3 = addTolerationToPod(p3, "testTaint", "test", 1)
|
||||
p4 = addTolerationToPod(p4, "testTaintX", "testX", 1)
|
||||
|
||||
p12.Spec.NodeSelector = map[string]string{
|
||||
"datacenter": "west",
|
||||
}
|
||||
|
||||
p13 := test.BuildTestPod("p13", 100, 0, node5.Name, nil)
|
||||
p13.ObjectMeta.OwnerReferences = test.GetNormalPodOwnerRefList()
|
||||
// node5 has PreferNoSchedule:testTaint1=test1, so the p13 has to have
|
||||
// PreferNoSchedule:testTaint0=test0 so the pod is not tolarated
|
||||
p13 = addTolerationToPod(p13, "testTaint", "test", 0, v1.TaintEffectPreferNoSchedule)
|
||||
|
||||
var uint1 uint = 1
|
||||
|
||||
tests := []struct {
|
||||
@@ -153,8 +129,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
maxNoOfPodsToEvictPerNamespace *uint
|
||||
expectedEvictedPodCount uint
|
||||
nodeFit bool
|
||||
includePreferNoSchedule bool
|
||||
excludedTaints []string
|
||||
}{
|
||||
|
||||
{
|
||||
@@ -250,59 +224,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
expectedEvictedPodCount: 0, //p2 gets evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should not be evicted when not enabled",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node5},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating PreferNoSchedule node taint should be evicted when enabled",
|
||||
pods: []*v1.Pod{p13},
|
||||
nodes: []*v1.Node{node5},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
includePreferNoSchedule: true,
|
||||
expectedEvictedPodCount: 1, // p13 gets evicted
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating excluded node taints (by key) should not be evicted",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
excludedTaints: []string{"excludedTaint1", "testTaint1"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as one of the specified excludedTaints matches the key of node1's taint
|
||||
},
|
||||
{
|
||||
description: "Pods not tolerating excluded node taints (by key and value) should not be evicted",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
excludedTaints: []string{"testTaint1=test1"},
|
||||
expectedEvictedPodCount: 0, // nothing gets evicted, as both the key and value of the excluded taint match node1's taint
|
||||
},
|
||||
{
|
||||
description: "The excluded taint matches the key of node1's taint, but does not match the value",
|
||||
pods: []*v1.Pod{p2},
|
||||
nodes: []*v1.Node{node1},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: false,
|
||||
excludedTaints: []string{"testTaint1=test2"},
|
||||
expectedEvictedPodCount: 1, // pod gets evicted, as excluded taint value does not match node1's taint value
|
||||
},
|
||||
{
|
||||
description: "Critical and non critical pods, pods not tolerating node taint can't be evicted because the only available node does not have enough resources.",
|
||||
pods: []*v1.Pod{p2, p7, p9, p10},
|
||||
nodes: []*v1.Node{node1, node6},
|
||||
evictLocalStoragePods: false,
|
||||
evictSystemCriticalPods: true,
|
||||
expectedEvictedPodCount: 0, //p2 and p7 can't be evicted
|
||||
nodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -338,7 +259,6 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
tc.evictLocalStoragePods,
|
||||
tc.evictSystemCriticalPods,
|
||||
false,
|
||||
@@ -348,9 +268,7 @@ func TestDeletePodsViolatingNodeTaints(t *testing.T) {
|
||||
|
||||
strategy := api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: tc.nodeFit,
|
||||
IncludePreferNoSchedule: tc.includePreferNoSchedule,
|
||||
ExcludedTaints: tc.excludedTaints,
|
||||
NodeFit: tc.nodeFit,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -63,17 +63,16 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
resourceNames := getResourceNames(targetThresholds)
|
||||
|
||||
sourceNodes, highNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, false),
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return !isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
return !isNodeWithLowUtilization(usage)
|
||||
})
|
||||
|
||||
// log message in one line
|
||||
@@ -83,7 +82,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
@@ -111,7 +110,7 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if the total available usage has dropped to zero - no more pods can be scheduled
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
for name := range totalAvailableUsage {
|
||||
if totalAvailableUsage[name].CmpInt64(0) < 1 {
|
||||
return false
|
||||
@@ -120,10 +119,6 @@ func HighNodeUtilization(ctx context.Context, client clientset.Interface, strate
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in ascending order
|
||||
sortNodesByUsage(sourceNodes, true)
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
sourceNodes,
|
||||
@@ -164,7 +159,7 @@ func setDefaultForThresholds(thresholds, targetThresholds api.ResourceThresholds
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
targetThresholds[name] = MaxResourcePercentage
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,50 +385,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "Other node does not have enough Memory",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 200, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 50, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 100, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod requesting more memory than is available on node1
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
{
|
||||
name: "Other node does not have enough Memory",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 30,
|
||||
v1.ResourcePods: 30,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 200, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 50, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 50, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 100, n2NodeName, func(pod *v1.Pod) {
|
||||
// A pod requesting more memory than is available on node1
|
||||
test.SetRSOwnerRef(pod)
|
||||
}),
|
||||
},
|
||||
expectedPodsEvicted: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@@ -507,7 +463,6 @@ func TestHighNodeUtilization(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
testCase.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
@@ -713,7 +668,6 @@ func TestHighNodeUtilizationWithTaints(t *testing.T) {
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -50,57 +50,40 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
if strategy.Params != nil {
|
||||
nodeFit = strategy.Params.NodeFit
|
||||
}
|
||||
useDeviationThresholds := strategy.Params.NodeResourceUtilizationThresholds.UseDeviationThresholds
|
||||
|
||||
thresholds := strategy.Params.NodeResourceUtilizationThresholds.Thresholds
|
||||
targetThresholds := strategy.Params.NodeResourceUtilizationThresholds.TargetThresholds
|
||||
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds, useDeviationThresholds); err != nil {
|
||||
if err := validateLowUtilizationStrategyConfig(thresholds, targetThresholds); err != nil {
|
||||
klog.ErrorS(err, "LowNodeUtilization config is not valid")
|
||||
return
|
||||
}
|
||||
|
||||
// check if Pods/CPU/Mem are set, if not, set them to 100
|
||||
if _, ok := thresholds[v1.ResourcePods]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
thresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourcePods] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceCPU]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
thresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceCPU] = MaxResourcePercentage
|
||||
}
|
||||
if _, ok := thresholds[v1.ResourceMemory]; !ok {
|
||||
if useDeviationThresholds {
|
||||
thresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MinResourcePercentage
|
||||
} else {
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
thresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
targetThresholds[v1.ResourceMemory] = MaxResourcePercentage
|
||||
}
|
||||
resourceNames := getResourceNames(thresholds)
|
||||
|
||||
lowNodes, sourceNodes := classifyNodes(
|
||||
getNodeUsage(nodes, resourceNames, getPodsAssignedToNode),
|
||||
getNodeThresholds(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode, useDeviationThresholds),
|
||||
getNodeUsage(nodes, thresholds, targetThresholds, resourceNames, getPodsAssignedToNode),
|
||||
// The node has to be schedulable (to be able to move workload there)
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
if nodeutil.IsNodeUnschedulable(node) {
|
||||
klog.V(2).InfoS("Node is unschedulable, thus not considered as underutilized", "node", klog.KObj(node))
|
||||
return false
|
||||
}
|
||||
return isNodeWithLowUtilization(usage, threshold.lowResourceThreshold)
|
||||
return isNodeWithLowUtilization(usage)
|
||||
},
|
||||
func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool {
|
||||
return isNodeAboveTargetUtilization(usage, threshold.highResourceThreshold)
|
||||
func(node *v1.Node, usage NodeUsage) bool {
|
||||
return isNodeAboveTargetUtilization(usage)
|
||||
},
|
||||
)
|
||||
|
||||
@@ -111,7 +94,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
"Pods", thresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range thresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(thresholds[name]))
|
||||
}
|
||||
}
|
||||
@@ -125,7 +108,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
"Pods", targetThresholds[v1.ResourcePods],
|
||||
}
|
||||
for name := range targetThresholds {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), int64(targetThresholds[name]))
|
||||
}
|
||||
}
|
||||
@@ -155,8 +138,8 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
evictable := podEvictor.Evictable(evictions.WithPriorityThreshold(thresholdPriority), evictions.WithNodeFit(nodeFit))
|
||||
|
||||
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
|
||||
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
|
||||
continueEvictionCond := func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
|
||||
if !isNodeAboveTargetUtilization(nodeUsage) {
|
||||
return false
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
@@ -168,9 +151,6 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
return true
|
||||
}
|
||||
|
||||
// Sort the nodes by the usage in descending order
|
||||
sortNodesByUsage(sourceNodes, false)
|
||||
|
||||
evictPodsFromSourceNodes(
|
||||
ctx,
|
||||
sourceNodes,
|
||||
@@ -185,7 +165,7 @@ func LowNodeUtilization(ctx context.Context, client clientset.Interface, strateg
|
||||
}
|
||||
|
||||
// validateLowUtilizationStrategyConfig checks if the strategy's config is valid
|
||||
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds, useDeviationThresholds bool) error {
|
||||
func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.ResourceThresholds) error {
|
||||
// validate thresholds and targetThresholds config
|
||||
if err := validateThresholds(thresholds); err != nil {
|
||||
return fmt.Errorf("thresholds config is not valid: %v", err)
|
||||
@@ -201,7 +181,7 @@ func validateLowUtilizationStrategyConfig(thresholds, targetThresholds api.Resou
|
||||
for resourceName, value := range thresholds {
|
||||
if targetValue, ok := targetThresholds[resourceName]; !ok {
|
||||
return fmt.Errorf("thresholds and targetThresholds configured different resources")
|
||||
} else if value > targetValue && !useDeviationThresholds {
|
||||
} else if value > targetValue {
|
||||
return fmt.Errorf("thresholds' %v percentage is greater than targetThresholds'", resourceName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
useDeviationThresholds bool
|
||||
thresholds, targetThresholds api.ResourceThresholds
|
||||
nodes []*v1.Node
|
||||
pods []*v1.Pod
|
||||
@@ -644,57 +643,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
},
|
||||
expectedPodsEvicted: 3,
|
||||
},
|
||||
{
|
||||
name: "deviation thresholds",
|
||||
thresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
targetThresholds: api.ResourceThresholds{
|
||||
v1.ResourceCPU: 5,
|
||||
v1.ResourcePods: 5,
|
||||
},
|
||||
useDeviationThresholds: true,
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode(n1NodeName, 4000, 3000, 9, nil),
|
||||
test.BuildTestNode(n2NodeName, 4000, 3000, 10, nil),
|
||||
test.BuildTestNode(n3NodeName, 4000, 3000, 10, test.SetNodeUnschedulable),
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
test.BuildTestPod("p1", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p2", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p3", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p4", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
test.BuildTestPod("p5", 400, 0, n1NodeName, test.SetRSOwnerRef),
|
||||
// These won't be evicted.
|
||||
test.BuildTestPod("p6", 400, 0, n1NodeName, test.SetDSOwnerRef),
|
||||
test.BuildTestPod("p7", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A pod with local storage.
|
||||
test.SetNormalOwnerRef(pod)
|
||||
pod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "somePath"},
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
// A Mirror Pod.
|
||||
pod.Annotations = test.GetMirrorPodAnnotation()
|
||||
}),
|
||||
test.BuildTestPod("p8", 400, 0, n1NodeName, func(pod *v1.Pod) {
|
||||
// A Critical Pod.
|
||||
pod.Namespace = "kube-system"
|
||||
priority := utils.SystemCriticalPriority
|
||||
pod.Spec.Priority = &priority
|
||||
}),
|
||||
test.BuildTestPod("p9", 400, 0, n2NodeName, test.SetRSOwnerRef),
|
||||
},
|
||||
expectedPodsEvicted: 2,
|
||||
evictedPods: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
@@ -772,7 +720,6 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
@@ -784,9 +731,8 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
Enabled: true,
|
||||
Params: &api.StrategyParameters{
|
||||
NodeResourceUtilizationThresholds: &api.NodeResourceUtilizationThresholds{
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
UseDeviationThresholds: test.useDeviationThresholds,
|
||||
Thresholds: test.thresholds,
|
||||
TargetThresholds: test.targetThresholds,
|
||||
},
|
||||
NodeFit: true,
|
||||
},
|
||||
@@ -944,7 +890,7 @@ func TestValidateLowNodeUtilizationStrategyConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range tests {
|
||||
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds, false)
|
||||
validateErr := validateLowUtilizationStrategyConfig(testCase.thresholds, testCase.targetThresholds)
|
||||
|
||||
if validateErr == nil || testCase.errInfo == nil {
|
||||
if validateErr != testCase.errInfo {
|
||||
@@ -1087,7 +1033,6 @@ func TestLowNodeUtilizationWithTaints(t *testing.T) {
|
||||
&item.evictionsExpected,
|
||||
nil,
|
||||
item.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -27,8 +27,6 @@ import (
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
)
|
||||
@@ -38,19 +36,12 @@ type NodeUsage struct {
|
||||
node *v1.Node
|
||||
usage map[v1.ResourceName]*resource.Quantity
|
||||
allPods []*v1.Pod
|
||||
}
|
||||
|
||||
type NodeThresholds struct {
|
||||
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
highResourceThreshold map[v1.ResourceName]*resource.Quantity
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
NodeUsage
|
||||
thresholds NodeThresholds
|
||||
}
|
||||
|
||||
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
||||
type continueEvictionCond func(nodeUsage NodeUsage, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
|
||||
|
||||
// NodePodsMap is a set of (node, pods) pairs
|
||||
type NodePodsMap map[*v1.Node][]*v1.Pod
|
||||
@@ -75,7 +66,7 @@ func validateNodeUtilizationParams(params *api.StrategyParameters) error {
|
||||
|
||||
// validateThresholds checks if thresholds have valid resource name and resource percentage configured
|
||||
func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
if len(thresholds) == 0 {
|
||||
if thresholds == nil || len(thresholds) == 0 {
|
||||
return fmt.Errorf("no resource threshold is configured")
|
||||
}
|
||||
for name, percent := range thresholds {
|
||||
@@ -86,63 +77,9 @@ func validateThresholds(thresholds api.ResourceThresholds) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizePercentage(percent api.Percentage) api.Percentage {
|
||||
if percent > MaxResourcePercentage {
|
||||
return MaxResourcePercentage
|
||||
}
|
||||
if percent < MinResourcePercentage {
|
||||
return MinResourcePercentage
|
||||
}
|
||||
return percent
|
||||
}
|
||||
|
||||
func getNodeThresholds(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
useDeviationThresholds bool,
|
||||
) map[string]NodeThresholds {
|
||||
nodeThresholdsMap := map[string]NodeThresholds{}
|
||||
|
||||
averageResourceUsagePercent := api.ResourceThresholds{}
|
||||
if useDeviationThresholds {
|
||||
averageResourceUsagePercent = averageNodeBasicresources(nodes, getPodsAssignedToNode, resourceNames)
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
|
||||
nodeThresholdsMap[node.Name] = NodeThresholds{
|
||||
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
|
||||
}
|
||||
|
||||
for _, resourceName := range resourceNames {
|
||||
if useDeviationThresholds {
|
||||
cap := nodeCapacity[resourceName]
|
||||
if lowThreshold[resourceName] == MinResourcePercentage {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = &cap
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = &cap
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]-lowThreshold[resourceName]))
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, normalizePercentage(averageResourceUsagePercent[resourceName]+highThreshold[resourceName]))
|
||||
}
|
||||
} else {
|
||||
nodeThresholdsMap[node.Name].lowResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, lowThreshold[resourceName])
|
||||
nodeThresholdsMap[node.Name].highResourceThreshold[resourceName] = resourceThreshold(nodeCapacity, resourceName, highThreshold[resourceName])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nodeThresholdsMap
|
||||
}
|
||||
|
||||
func getNodeUsage(
|
||||
nodes []*v1.Node,
|
||||
lowThreshold, highThreshold api.ResourceThresholds,
|
||||
resourceNames []v1.ResourceName,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
) []NodeUsage {
|
||||
@@ -155,37 +92,48 @@ func getNodeUsage(
|
||||
continue
|
||||
}
|
||||
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given high/low resource threshold in Quantity units.
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
lowResourceThreshold := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(lowThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(lowThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
cap := nodeCapacity[name]
|
||||
lowResourceThreshold[name] = resource.NewQuantity(int64(float64(lowThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
highResourceThreshold := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(int64(float64(highThreshold[v1.ResourceCPU])*float64(nodeCapacity.Cpu().MilliValue())*0.01), resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(int64(float64(highThreshold[v1.ResourceMemory])*float64(nodeCapacity.Memory().Value())*0.01), resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(float64(highThreshold[v1.ResourcePods])*float64(nodeCapacity.Pods().Value())*0.01), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
cap := nodeCapacity[name]
|
||||
highResourceThreshold[name] = resource.NewQuantity(int64(float64(highThreshold[name])*float64(cap.Value())*0.01), resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
nodeUsageList = append(nodeUsageList, NodeUsage{
|
||||
node: node,
|
||||
usage: nodeutil.NodeUtilization(pods, resourceNames),
|
||||
allPods: pods,
|
||||
node: node,
|
||||
usage: nodeUtilization(node, pods, resourceNames),
|
||||
allPods: pods,
|
||||
lowResourceThreshold: lowResourceThreshold,
|
||||
highResourceThreshold: highResourceThreshold,
|
||||
})
|
||||
}
|
||||
|
||||
return nodeUsageList
|
||||
}
|
||||
|
||||
func resourceThreshold(nodeCapacity v1.ResourceList, resourceName v1.ResourceName, threshold api.Percentage) *resource.Quantity {
|
||||
defaultFormat := resource.DecimalSI
|
||||
if resourceName == v1.ResourceMemory {
|
||||
defaultFormat = resource.BinarySI
|
||||
}
|
||||
|
||||
resourceCapacityFraction := func(resourceNodeCapacity int64) int64 {
|
||||
// A threshold is in percentages but in <0;100> interval.
|
||||
// Performing `threshold * 0.01` will convert <0;100> interval into <0;1>.
|
||||
// Multiplying it with capacity will give fraction of the capacity corresponding to the given resource threshold in Quantity units.
|
||||
return int64(float64(threshold) * 0.01 * float64(resourceNodeCapacity))
|
||||
}
|
||||
|
||||
resourceCapacityQuantity := nodeCapacity.Name(resourceName, defaultFormat)
|
||||
|
||||
if resourceName == v1.ResourceCPU {
|
||||
return resource.NewMilliQuantity(resourceCapacityFraction(resourceCapacityQuantity.MilliValue()), defaultFormat)
|
||||
}
|
||||
return resource.NewQuantity(resourceCapacityFraction(resourceCapacityQuantity.Value()), defaultFormat)
|
||||
}
|
||||
|
||||
func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
nodeCapacity := nodeUsage.node.Status.Capacity
|
||||
if len(nodeUsage.node.Status.Allocatable) > 0 {
|
||||
@@ -207,24 +155,19 @@ func resourceUsagePercentages(nodeUsage NodeUsage) map[v1.ResourceName]float64 {
|
||||
// low and high thresholds, it is simply ignored.
|
||||
func classifyNodes(
|
||||
nodeUsages []NodeUsage,
|
||||
nodeThresholds map[string]NodeThresholds,
|
||||
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage, threshold NodeThresholds) bool,
|
||||
) ([]NodeInfo, []NodeInfo) {
|
||||
lowNodes, highNodes := []NodeInfo{}, []NodeInfo{}
|
||||
lowThresholdFilter, highThresholdFilter func(node *v1.Node, usage NodeUsage) bool,
|
||||
) ([]NodeUsage, []NodeUsage) {
|
||||
lowNodes, highNodes := []NodeUsage{}, []NodeUsage{}
|
||||
|
||||
for _, nodeUsage := range nodeUsages {
|
||||
nodeInfo := NodeInfo{
|
||||
NodeUsage: nodeUsage,
|
||||
thresholds: nodeThresholds[nodeUsage.node.Name],
|
||||
}
|
||||
if lowThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
lowNodes = append(lowNodes, nodeInfo)
|
||||
} else if highThresholdFilter(nodeUsage.node, nodeUsage, nodeThresholds[nodeUsage.node.Name]) {
|
||||
klog.InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
highNodes = append(highNodes, nodeInfo)
|
||||
if lowThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||
klog.V(2).InfoS("Node is underutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
lowNodes = append(lowNodes, nodeUsage)
|
||||
} else if highThresholdFilter(nodeUsage.node, nodeUsage) {
|
||||
klog.V(2).InfoS("Node is overutilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
highNodes = append(highNodes, nodeUsage)
|
||||
} else {
|
||||
klog.InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
klog.V(2).InfoS("Node is appropriately utilized", "node", klog.KObj(nodeUsage.node), "usage", nodeUsage.usage, "usagePercentage", resourceUsagePercentages(nodeUsage))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,13 +179,16 @@ func classifyNodes(
|
||||
// TODO: @ravig Break this function into smaller functions.
|
||||
func evictPodsFromSourceNodes(
|
||||
ctx context.Context,
|
||||
sourceNodes, destinationNodes []NodeInfo,
|
||||
sourceNodes, destinationNodes []NodeUsage,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
podFilter func(pod *v1.Pod) bool,
|
||||
resourceNames []v1.ResourceName,
|
||||
strategy string,
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
|
||||
sortNodesByUsage(sourceNodes)
|
||||
|
||||
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
|
||||
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourcePods: {},
|
||||
@@ -258,7 +204,7 @@ func evictPodsFromSourceNodes(
|
||||
if _, ok := totalAvailableUsage[name]; !ok {
|
||||
totalAvailableUsage[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
totalAvailableUsage[name].Add(*node.thresholds.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Add(*node.highResourceThreshold[name])
|
||||
totalAvailableUsage[name].Sub(*node.usage[name])
|
||||
}
|
||||
}
|
||||
@@ -270,7 +216,7 @@ func evictPodsFromSourceNodes(
|
||||
"Pods", totalAvailableUsage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !node.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
@@ -298,7 +244,7 @@ func evictPodsFromSourceNodes(
|
||||
func evictPods(
|
||||
ctx context.Context,
|
||||
inputPods []*v1.Pod,
|
||||
nodeInfo NodeInfo,
|
||||
nodeUsage NodeUsage,
|
||||
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
|
||||
taintsOfLowNodes map[string][]v1.Taint,
|
||||
podEvictor *evictions.PodEvictor,
|
||||
@@ -306,14 +252,14 @@ func evictPods(
|
||||
continueEviction continueEvictionCond,
|
||||
) {
|
||||
|
||||
if continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
if continueEviction(nodeUsage, totalAvailableUsage) {
|
||||
for _, pod := range inputPods {
|
||||
if !utils.PodToleratesTaints(pod, taintsOfLowNodes) {
|
||||
klog.V(3).InfoS("Skipping eviction for pod, doesn't tolerate node taint", "pod", klog.KObj(pod))
|
||||
continue
|
||||
}
|
||||
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeInfo.node, strategy)
|
||||
success, err := podEvictor.EvictPod(ctx, pod, nodeUsage.node, strategy)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error evicting pod", "pod", klog.KObj(pod))
|
||||
break
|
||||
@@ -324,30 +270,30 @@ func evictPods(
|
||||
|
||||
for name := range totalAvailableUsage {
|
||||
if name == v1.ResourcePods {
|
||||
nodeInfo.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
nodeUsage.usage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
totalAvailableUsage[name].Sub(*resource.NewQuantity(1, resource.DecimalSI))
|
||||
} else {
|
||||
quantity := utils.GetResourceRequestQuantity(pod, name)
|
||||
nodeInfo.usage[name].Sub(quantity)
|
||||
nodeUsage.usage[name].Sub(quantity)
|
||||
totalAvailableUsage[name].Sub(quantity)
|
||||
}
|
||||
}
|
||||
|
||||
keysAndValues := []interface{}{
|
||||
"node", nodeInfo.node.Name,
|
||||
"CPU", nodeInfo.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeInfo.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeInfo.usage[v1.ResourcePods].Value(),
|
||||
"node", nodeUsage.node.Name,
|
||||
"CPU", nodeUsage.usage[v1.ResourceCPU].MilliValue(),
|
||||
"Mem", nodeUsage.usage[v1.ResourceMemory].Value(),
|
||||
"Pods", nodeUsage.usage[v1.ResourcePods].Value(),
|
||||
}
|
||||
for name := range totalAvailableUsage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
keysAndValues = append(keysAndValues, string(name), totalAvailableUsage[name].Value())
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(3).InfoS("Updated node usage", keysAndValues...)
|
||||
// check if pods can be still evicted
|
||||
if !continueEviction(nodeInfo, totalAvailableUsage) {
|
||||
if !continueEviction(nodeUsage, totalAvailableUsage) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -355,36 +301,31 @@ func evictPods(
|
||||
}
|
||||
}
|
||||
|
||||
// sortNodesByUsage sorts nodes based on usage according to the given strategy.
|
||||
func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
|
||||
// sortNodesByUsage sorts nodes based on usage in descending order
|
||||
func sortNodesByUsage(nodes []NodeUsage) {
|
||||
sort.Slice(nodes, func(i, j int) bool {
|
||||
ti := nodes[i].usage[v1.ResourceMemory].Value() + nodes[i].usage[v1.ResourceCPU].MilliValue() + nodes[i].usage[v1.ResourcePods].Value()
|
||||
tj := nodes[j].usage[v1.ResourceMemory].Value() + nodes[j].usage[v1.ResourceCPU].MilliValue() + nodes[j].usage[v1.ResourcePods].Value()
|
||||
|
||||
// extended resources
|
||||
for name := range nodes[i].usage {
|
||||
if !nodeutil.IsBasicResource(name) {
|
||||
if !isBasicResource(name) {
|
||||
ti = ti + nodes[i].usage[name].Value()
|
||||
tj = tj + nodes[j].usage[name].Value()
|
||||
}
|
||||
}
|
||||
|
||||
// Return ascending order for HighNodeUtilization strategy
|
||||
if ascending {
|
||||
return ti < tj
|
||||
}
|
||||
|
||||
// Return descending order for LowNodeUtilization strategy
|
||||
// To return sorted in descending order
|
||||
return ti > tj
|
||||
})
|
||||
}
|
||||
|
||||
// isNodeAboveTargetUtilization checks if a node is overutilized
|
||||
// At least one resource has to be above the high threshold
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
func isNodeAboveTargetUtilization(usage NodeUsage) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.highResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
if usage.highResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -393,10 +334,10 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
|
||||
|
||||
// isNodeWithLowUtilization checks if a node is underutilized
|
||||
// All resources have to be below the low threshold
|
||||
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
|
||||
func isNodeWithLowUtilization(usage NodeUsage) bool {
|
||||
for name, nodeValue := range usage.usage {
|
||||
// usage.lowResourceThreshold[name] < nodeValue
|
||||
if threshold[name].Cmp(*nodeValue) == -1 {
|
||||
if usage.lowResourceThreshold[name].Cmp(*nodeValue) == -1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -413,6 +354,43 @@ func getResourceNames(thresholds api.ResourceThresholds) []v1.ResourceName {
|
||||
return resourceNames
|
||||
}
|
||||
|
||||
// isBasicResource checks if resource is basic native.
|
||||
func isBasicResource(name v1.ResourceName) bool {
|
||||
switch name {
|
||||
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func nodeUtilization(node *v1.Node, pods []*v1.Pod, resourceNames []v1.ResourceName) map[v1.ResourceName]*resource.Quantity {
|
||||
totalReqs := map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),
|
||||
}
|
||||
for _, name := range resourceNames {
|
||||
if !isBasicResource(name) {
|
||||
totalReqs[name] = resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range pods {
|
||||
req, _ := utils.PodRequestsAndLimits(pod)
|
||||
for _, name := range resourceNames {
|
||||
quantity, ok := req[name]
|
||||
if ok && name != v1.ResourcePods {
|
||||
// As Quantity.Add says: Add adds the provided y quantity to the current value. If the current value is zero,
|
||||
// the format of the quantity will be updated to the format of y.
|
||||
totalReqs[name].Add(quantity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return totalReqs
|
||||
}
|
||||
|
||||
func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*v1.Pod) {
|
||||
var nonRemovablePods, removablePods []*v1.Pod
|
||||
|
||||
@@ -426,34 +404,3 @@ func classifyPods(pods []*v1.Pod, filter func(pod *v1.Pod) bool) ([]*v1.Pod, []*
|
||||
|
||||
return nonRemovablePods, removablePods
|
||||
}
|
||||
|
||||
func averageNodeBasicresources(nodes []*v1.Node, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, resourceNames []v1.ResourceName) api.ResourceThresholds {
|
||||
total := api.ResourceThresholds{}
|
||||
average := api.ResourceThresholds{}
|
||||
numberOfNodes := len(nodes)
|
||||
for _, node := range nodes {
|
||||
pods, err := podutil.ListPodsOnANode(node.Name, getPodsAssignedToNode, nil)
|
||||
if err != nil {
|
||||
numberOfNodes--
|
||||
continue
|
||||
}
|
||||
usage := nodeutil.NodeUtilization(pods, resourceNames)
|
||||
nodeCapacity := node.Status.Capacity
|
||||
if len(node.Status.Allocatable) > 0 {
|
||||
nodeCapacity = node.Status.Allocatable
|
||||
}
|
||||
for resource, value := range usage {
|
||||
nodeCapacityValue := nodeCapacity[resource]
|
||||
if resource == v1.ResourceCPU {
|
||||
total[resource] += api.Percentage(value.MilliValue()) / api.Percentage(nodeCapacityValue.MilliValue()) * 100.0
|
||||
} else {
|
||||
total[resource] += api.Percentage(value.Value()) / api.Percentage(nodeCapacityValue.Value()) * 100.0
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
for resource, value := range total {
|
||||
average[resource] = value / api.Percentage(numberOfNodes)
|
||||
}
|
||||
return average
|
||||
}
|
||||
|
||||
@@ -18,91 +18,17 @@ package nodeutilization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"math"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
lowPriority = int32(0)
|
||||
highPriority = int32(10000)
|
||||
extendedResource = v1.ResourceName("example.com/foo")
|
||||
testNode1 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node1"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode2 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node2"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
testNode3 = NodeInfo{
|
||||
NodeUsage: NodeUsage{
|
||||
node: &v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3977868*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1930, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(3287692*1024, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(29, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node3"},
|
||||
},
|
||||
usage: map[v1.ResourceName]*resource.Quantity{
|
||||
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
|
||||
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
|
||||
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestValidateThresholds(t *testing.T) {
|
||||
@@ -230,27 +156,3 @@ func TestResourceUsagePercentages(t *testing.T) {
|
||||
|
||||
t.Logf("resourceUsagePercentage: %#v\n", resourceUsagePercentage)
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageDescendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode3, testNode1, testNode2} // testNode3 has the highest usage
|
||||
sortNodesByUsage(nodeList, false) // ascending=false, sort nodes in descending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortNodesByUsageAscendingOrder(t *testing.T) {
|
||||
nodeList := []NodeInfo{testNode1, testNode2, testNode3}
|
||||
expectedNodeList := []NodeInfo{testNode2, testNode1, testNode3}
|
||||
sortNodesByUsage(nodeList, true) // ascending=true, sort nodes in ascending order
|
||||
|
||||
for i := 0; i < len(expectedNodeList); i++ {
|
||||
if nodeList[i].NodeUsage.node.Name != expectedNodeList[i].NodeUsage.node.Name {
|
||||
t.Errorf("Expected %v, got %v", expectedNodeList[i].NodeUsage.node.Name, nodeList[i].NodeUsage.node.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("n4", 2, 2, 1, nil)
|
||||
|
||||
p1 := test.BuildTestPod("p1", 100, 0, node1.Name, nil)
|
||||
p2 := test.BuildTestPod("p2", 100, 0, node1.Name, nil)
|
||||
@@ -175,14 +174,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
nodes: []*v1.Node{node1},
|
||||
expectedEvictedPodCount: 0,
|
||||
},
|
||||
{
|
||||
description: "Won't evict pods because only other node doesn't have enough resources",
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
pods: []*v1.Pod{p1, p2, p3, p4},
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
nodeFit: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -218,7 +209,6 @@ func TestPodAntiAffinity(t *testing.T) {
|
||||
test.maxPodsToEvictPerNode,
|
||||
test.maxNoOfPodsToEvictPerNamespace,
|
||||
test.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -298,7 +298,6 @@ func TestPodLifeTime(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
tc.ignorePvcPods,
|
||||
|
||||
@@ -97,10 +97,8 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
Unschedulable: true,
|
||||
}
|
||||
})
|
||||
node4 := test.BuildTestNode("node4", 200, 3000, 10, nil)
|
||||
node5 := test.BuildTestNode("node5", 2000, 3000, 10, nil)
|
||||
|
||||
pods := append(append(initPods(node1), test.BuildTestPod("CPU-consumer-1", 150, 100, node4.Name, nil)), test.BuildTestPod("CPU-consumer-2", 150, 100, node5.Name, nil))
|
||||
pods := initPods(node1)
|
||||
|
||||
createStrategy := func(enabled, includingInitContainers bool, restartThresholds int32, nodeFit bool) api.DeschedulerStrategy {
|
||||
return api.DeschedulerStrategy{
|
||||
@@ -201,20 +199,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node does not have enough CPU, 0 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node4},
|
||||
expectedEvictedPodCount: 0,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
{
|
||||
description: "All pods have total restarts equals threshold(maxPodsToEvictPerNode=3) but the only other node has enough CPU, 3 pod evictions",
|
||||
strategy: createStrategy(true, true, 1, true),
|
||||
nodes: []*v1.Node{node1, node5},
|
||||
expectedEvictedPodCount: 3,
|
||||
maxPodsToEvictPerNode: &uint3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@@ -250,7 +234,6 @@ func TestRemovePodsHavingTooManyRestarts(t *testing.T) {
|
||||
tc.maxPodsToEvictPerNode,
|
||||
tc.maxNoOfPodsToEvictPerNamespace,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -18,18 +18,20 @@ package strategies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
|
||||
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/validation"
|
||||
"sigs.k8s.io/descheduler/pkg/utils"
|
||||
@@ -168,7 +170,7 @@ func RemovePodsViolatingTopologySpreadConstraint(
|
||||
klog.V(2).InfoS("Skipping topology constraint because it is already balanced", "constraint", constraint)
|
||||
continue
|
||||
}
|
||||
balanceDomains(client, getPodsAssignedToNode, podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodes)
|
||||
balanceDomains(podsForEviction, constraint, constraintTopologies, sumPods, evictable.IsEvictable, nodeMap)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,14 +225,12 @@ func topologyIsBalanced(topology map[topologyPair][]*v1.Pod, constraint v1.Topol
|
||||
// [5, 5, 5, 5, 5, 5]
|
||||
// (assuming even distribution by the scheduler of the evicted pods)
|
||||
func balanceDomains(
|
||||
client clientset.Interface,
|
||||
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
|
||||
podsForEviction map[*v1.Pod]struct{},
|
||||
constraint v1.TopologySpreadConstraint,
|
||||
constraintTopologies map[topologyPair][]*v1.Pod,
|
||||
sumPods float64,
|
||||
isEvictable func(pod *v1.Pod) bool,
|
||||
nodes []*v1.Node) {
|
||||
isEvictable func(*v1.Pod) bool,
|
||||
nodeMap map[string]*v1.Node) {
|
||||
|
||||
idealAvg := sumPods / float64(len(constraintTopologies))
|
||||
sortedDomains := sortDomains(constraintTopologies, isEvictable)
|
||||
@@ -273,19 +273,8 @@ func balanceDomains(
|
||||
// also (just for tracking), add them to the list of pods in the lower topology
|
||||
aboveToEvict := sortedDomains[j].pods[len(sortedDomains[j].pods)-movePods:]
|
||||
for k := range aboveToEvict {
|
||||
// PodFitsAnyOtherNode excludes the current node because, for the sake of domain balancing only, we care about if there is any other
|
||||
// place it could theoretically fit.
|
||||
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
|
||||
// Also, if the pod has a hard nodeAffinity/nodeSelector/toleration that only matches this node,
|
||||
// don't bother evicting it as it will just end up back on the same node
|
||||
// however we still account for it "being evicted" so the algorithm can complete
|
||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above).
|
||||
// In other words, PTS can perform suboptimally if some of its chosen pods don't fit on other nodes.
|
||||
// This is because the chosen pods aren't sorted, but immovable pods still count as "evicted" toward the PTS algorithm.
|
||||
// So, a better selection heuristic could improve performance.
|
||||
if !node.PodFitsAnyOtherNode(getPodsAssignedToNode, aboveToEvict[k], nodes) {
|
||||
klog.V(2).InfoS("ignoring pod for eviction as it does not fit on any other node", "pod", klog.KObj(aboveToEvict[k]))
|
||||
if err := validatePodFitsOnOtherNodes(aboveToEvict[k], nodeMap); err != nil {
|
||||
klog.V(2).InfoS(fmt.Sprintf("ignoring pod for eviction due to: %s", err.Error()), "pod", klog.KObj(aboveToEvict[k]))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -296,6 +285,56 @@ func balanceDomains(
|
||||
}
|
||||
}
|
||||
|
||||
// validatePodFitsOnOtherNodes performs validation based on scheduling predicates for affinity and toleration.
|
||||
// It excludes the current node because, for the sake of domain balancing only, we care about if there is any other
|
||||
// place it could theoretically fit.
|
||||
// If the pod doesn't fit on its current node, that is a job for RemovePodsViolatingNodeAffinity, and irrelevant to Topology Spreading
|
||||
func validatePodFitsOnOtherNodes(pod *v1.Pod, nodeMap map[string]*v1.Node) error {
|
||||
// if the pod has a hard nodeAffinity/nodeSelector/toleration that only matches this node,
|
||||
// don't bother evicting it as it will just end up back on the same node
|
||||
// however we still account for it "being evicted" so the algorithm can complete
|
||||
// TODO(@damemi): Since we don't order pods wrt their affinities, we should refactor this to skip the current pod
|
||||
// but still try to get the required # of movePods (instead of just chopping that value off the slice above)
|
||||
isRequiredDuringSchedulingIgnoredDuringExecution := pod.Spec.Affinity != nil &&
|
||||
pod.Spec.Affinity.NodeAffinity != nil &&
|
||||
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil
|
||||
|
||||
hardTaintsFilter := func(taint *v1.Taint) bool {
|
||||
return taint.Effect == v1.TaintEffectNoSchedule || taint.Effect == v1.TaintEffectNoExecute
|
||||
}
|
||||
|
||||
var eligibleNodesCount, ineligibleAffinityNodesCount, ineligibleTaintedNodesCount int
|
||||
for _, node := range nodeMap {
|
||||
if node == nodeMap[pod.Spec.NodeName] {
|
||||
continue
|
||||
}
|
||||
if pod.Spec.NodeSelector != nil || isRequiredDuringSchedulingIgnoredDuringExecution {
|
||||
if !nodeutil.PodFitsCurrentNode(pod, node) {
|
||||
ineligibleAffinityNodesCount++
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !utils.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, node.Spec.Taints, hardTaintsFilter) {
|
||||
ineligibleTaintedNodesCount++
|
||||
continue
|
||||
}
|
||||
|
||||
eligibleNodesCount++
|
||||
}
|
||||
|
||||
if eligibleNodesCount == 0 {
|
||||
var errs []error
|
||||
if ineligibleAffinityNodesCount > 0 {
|
||||
errs = append(errs, fmt.Errorf("%d nodes with ineligible selector/affinity", ineligibleAffinityNodesCount))
|
||||
}
|
||||
if ineligibleTaintedNodesCount > 0 {
|
||||
errs = append(errs, fmt.Errorf("%d nodes with taints that are not tolerated", ineligibleTaintedNodesCount))
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortDomains sorts and splits the list of topology domains based on their size
|
||||
// it also sorts the list of pods within the domains based on their node affinity/selector and priority in the following order:
|
||||
// 1. non-evictable pods
|
||||
@@ -303,7 +342,7 @@ func balanceDomains(
|
||||
// 3. pods in descending priority
|
||||
// 4. all other pods
|
||||
// We then pop pods off the back of the list for eviction
|
||||
func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(pod *v1.Pod) bool) []topology {
|
||||
func sortDomains(constraintTopologyPairs map[topologyPair][]*v1.Pod, isEvictable func(*v1.Pod) bool) []topology {
|
||||
sortedTopologies := make([]topology, 0, len(constraintTopologyPairs))
|
||||
// sort the topologies and return 2 lists: those <= the average and those > the average (> list inverted)
|
||||
for pair, list := range constraintTopologyPairs {
|
||||
|
||||
@@ -483,38 +483,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains size [2 6], maxSkew=2, can't move any because node1 does not have enough CPU",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 200, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneB" }),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(2),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
count: 6,
|
||||
node: "n2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
// see https://github.com/kubernetes-sigs/descheduler/issues/564
|
||||
name: "Multiple constraints (6 nodes/2 zones, 4 pods)",
|
||||
@@ -718,7 +686,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pods since pod does not tolerate the tainted node",
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
@@ -750,43 +718,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
strategy: api.DeschedulerStrategy{},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 0 pods since pod does not tolerate the tainted node, and NodeFit is enabled",
|
||||
nodes: []*v1.Node{
|
||||
test.BuildTestNode("n1", 2000, 3000, 10, func(n *v1.Node) { n.Labels["zone"] = "zoneA" }),
|
||||
test.BuildTestNode("n2", 2000, 3000, 10, func(n *v1.Node) {
|
||||
n.Labels["zone"] = "zoneB"
|
||||
n.Spec.Taints = []v1.Taint{
|
||||
{
|
||||
Key: "taint-test",
|
||||
Value: "test",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}),
|
||||
},
|
||||
pods: createTestPods([]testPodList{
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
constraints: getDefaultTopologyConstraints(1),
|
||||
},
|
||||
{
|
||||
count: 1,
|
||||
node: "n1",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
nodeSelector: map[string]string{"zone": "zoneA"},
|
||||
},
|
||||
}),
|
||||
expectedEvictedCount: 0,
|
||||
strategy: api.DeschedulerStrategy{
|
||||
Params: &api.StrategyParameters{
|
||||
NodeFit: true,
|
||||
},
|
||||
},
|
||||
namespaces: []string{"ns1"},
|
||||
},
|
||||
{
|
||||
name: "2 domains, sizes [2,0], maxSkew=1, move 1 pod for node with PreferNoSchedule Taint",
|
||||
nodes: []*v1.Node{
|
||||
@@ -971,7 +902,6 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
tc.nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -6,9 +6,25 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// owner: @jinxu
|
||||
// beta: v1.10
|
||||
//
|
||||
// New local storage types to support local storage capacity isolation
|
||||
LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation"
|
||||
|
||||
// owner: @egernst
|
||||
// alpha: v1.16
|
||||
//
|
||||
// Enables PodOverhead, for accounting pod overheads which are specific to a given RuntimeClass
|
||||
PodOverhead featuregate.Feature = "PodOverhead"
|
||||
)
|
||||
|
||||
// GetResourceRequest finds and returns the request value for a specific resource.
|
||||
func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
if resource == v1.ResourcePods {
|
||||
@@ -37,6 +53,11 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(LocalStorageCapacityIsolation) {
|
||||
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
|
||||
return requestQuantity
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
requestQuantity.Add(rQuantity)
|
||||
@@ -51,9 +72,9 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
}
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the total requests if the resource total is non-zero
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
|
||||
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||
requestQuantity.Add(podOverhead)
|
||||
}
|
||||
@@ -141,9 +162,9 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||
maxResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
|
||||
// We assume pod overhead feature gate is enabled.
|
||||
// We can't import the scheduler settings so we will inherit the default.
|
||||
if pod.Spec.Overhead != nil {
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the sum of reqeuests and to non-zero limits:
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(PodOverhead) {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
@@ -186,31 +207,12 @@ func maxResourceList(list, new v1.ResourceList) {
|
||||
|
||||
// PodToleratesTaints returns true if a pod tolerates one node's taints
|
||||
func PodToleratesTaints(pod *v1.Pod, taintsOfNodes map[string][]v1.Taint) bool {
|
||||
|
||||
for nodeName, taintsForNode := range taintsOfNodes {
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) {
|
||||
|
||||
if TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||
return true
|
||||
}
|
||||
|
||||
if klog.V(5).Enabled() {
|
||||
for i := range taintsForNode {
|
||||
if !TolerationsTolerateTaint(pod.Spec.Tolerations, &taintsForNode[i]) {
|
||||
klog.V(5).InfoS("Pod doesn't tolerate node taint",
|
||||
"pod", klog.KObj(pod),
|
||||
"nodeName", nodeName,
|
||||
"taint", fmt.Sprintf("%s:%s=%s", taintsForNode[i].Key, taintsForNode[i].Value, taintsForNode[i].Effect),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
klog.V(5).InfoS("Pod doesn't tolerate nodes taint, count mismatch",
|
||||
"pod", klog.KObj(pod),
|
||||
"nodeName", nodeName,
|
||||
)
|
||||
if len(pod.Spec.Tolerations) >= len(taintsForNode) && TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taintsForNode, nil) {
|
||||
return true
|
||||
}
|
||||
klog.V(5).InfoS("Pod doesn't tolerate nodes taint", "pod", klog.KObj(pod), "nodeName", nodeName)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -20,12 +20,16 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
|
||||
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
|
||||
@@ -105,8 +109,7 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -144,7 +147,6 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -175,3 +177,27 @@ func TestRemoveDuplicates(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
|
||||
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labelMap).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != desireRunningPodNum {
|
||||
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestFailedPods(t *testing.T) {
|
||||
defer jobClient.Delete(ctx, job.Name, metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy})
|
||||
waitForJobPodPhase(ctx, t, clientSet, job, v1.PodFailed)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
t.Logf("Running RemoveFailedPods strategy for %s", name)
|
||||
strategies.RemoveFailedPods(
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
)
|
||||
|
||||
func TestLeaderElection(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, _, _, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
ns1 := "e2e-" + strings.ToLower(t.Name()+"-a")
|
||||
ns2 := "e2e-" + strings.ToLower(t.Name()+"-b")
|
||||
|
||||
t.Logf("Creating testing namespace %v", ns1)
|
||||
testNamespace1 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns1}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace1, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace1.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace1.Name, metav1.DeleteOptions{})
|
||||
|
||||
t.Logf("Creating testing namespace %v", ns2)
|
||||
testNamespace2 := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns2}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace2, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace2.Name)
|
||||
}
|
||||
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace2.Name, metav1.DeleteOptions{})
|
||||
|
||||
deployment1, err := createDeployment(ctx, clientSet, ns1, 5, t)
|
||||
if err != nil {
|
||||
t.Fatalf("create deployment 1: %v", err)
|
||||
}
|
||||
defer clientSet.AppsV1().Deployments(deployment1.Namespace).Delete(ctx, deployment1.Name, metav1.DeleteOptions{})
|
||||
|
||||
deployment2, err := createDeployment(ctx, clientSet, ns2, 5, t)
|
||||
if err != nil {
|
||||
t.Fatalf("create deployment 2: %v", err)
|
||||
}
|
||||
defer clientSet.AppsV1().Deployments(deployment2.Namespace).Delete(ctx, deployment2.Name, metav1.DeleteOptions{})
|
||||
|
||||
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns1)
|
||||
|
||||
podListAOrg := getPodNameList(ctx, clientSet, ns1, t)
|
||||
|
||||
waitForPodsRunning(ctx, t, clientSet, map[string]string{"test": "leaderelection", "name": "test-leaderelection"}, 5, ns2)
|
||||
|
||||
podListBOrg := getPodNameList(ctx, clientSet, ns2, t)
|
||||
|
||||
s1, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to initialize server: %v", err)
|
||||
}
|
||||
s1.Client = clientSet
|
||||
s1.DeschedulingInterval = 5 * time.Second
|
||||
s1.LeaderElection.LeaderElect = true
|
||||
s1.KubeconfigFile = os.Getenv("KUBECONFIG")
|
||||
s1.PolicyConfigFile = "./policy_leaderelection_a.yaml"
|
||||
|
||||
s2, err := options.NewDeschedulerServer()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to initialize server: %v", err)
|
||||
}
|
||||
s2.Client = clientSet
|
||||
s2.DeschedulingInterval = 5 * time.Second
|
||||
s2.LeaderElection.LeaderElect = true
|
||||
s2.KubeconfigFile = os.Getenv("KUBECONFIG")
|
||||
s2.PolicyConfigFile = "./policy_leaderelection_b.yaml"
|
||||
|
||||
t.Log("starting deschedulers")
|
||||
|
||||
go func() {
|
||||
err := descheduler.Run(ctx, s1)
|
||||
if err != nil {
|
||||
t.Errorf("unable to start descheduler: %v", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
go func() {
|
||||
err := descheduler.Run(ctx, s2)
|
||||
if err != nil {
|
||||
t.Errorf("unable to start descheduler: %v", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
defer clientSet.CoordinationV1().Leases(s1.LeaderElection.ResourceNamespace).Delete(ctx, s1.LeaderElection.ResourceName, metav1.DeleteOptions{})
|
||||
defer clientSet.CoordinationV1().Leases(s2.LeaderElection.ResourceNamespace).Delete(ctx, s2.LeaderElection.ResourceName, metav1.DeleteOptions{})
|
||||
|
||||
// wait for a while so all the pods are 5 seconds older
|
||||
time.Sleep(7 * time.Second)
|
||||
|
||||
// validate only pods from e2e-testleaderelection-a namespace are evicted.
|
||||
podListA := getPodNameList(ctx, clientSet, ns1, t)
|
||||
|
||||
podListB := getPodNameList(ctx, clientSet, ns2, t)
|
||||
|
||||
left := reflect.DeepEqual(podListAOrg, podListA)
|
||||
right := reflect.DeepEqual(podListBOrg, podListB)
|
||||
|
||||
singleNamespaceEvicted := (left && !right) || (!left && right)
|
||||
|
||||
if singleNamespaceEvicted {
|
||||
if !left {
|
||||
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns1, podListAOrg, podListA)
|
||||
} else {
|
||||
t.Logf("Only the pods in %s namespace are evicted. Pods before: %s, Pods after %s", ns2, podListBOrg, podListB)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("Pods are evicted in both namespaces. For %s namespace Pods before: %s, Pods after %s. And, for %s namespace Pods before: %s, Pods after: %s", ns1, podListAOrg, podListA, ns2, podListBOrg, podListB)
|
||||
}
|
||||
}
|
||||
|
||||
func createDeployment(ctx context.Context, clientSet clientset.Interface, namespace string, replicas int32, t *testing.T) (*appsv1.Deployment, error) {
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "leaderelection",
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"test": "leaderelection", "name": "test-leaderelection"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
ImagePullPolicy: "Always",
|
||||
Image: "kubernetes/pause",
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Logf("Creating deployment %v for namespace %s", deployment.Name, deployment.Namespace)
|
||||
deployment, err := clientSet.AppsV1().Deployments(deployment.Namespace).Create(ctx, deployment, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Logf("Error creating deployment: %v", err)
|
||||
if err = clientSet.AppsV1().Deployments(deployment.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "leaderelection", "name": "test-leaderelection"})).String(),
|
||||
}); err != nil {
|
||||
t.Fatalf("Unable to delete deployment: %v", err)
|
||||
}
|
||||
return nil, fmt.Errorf("create deployment %v", err)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
func getPodNameList(ctx context.Context, clientSet clientset.Interface, namespace string, t *testing.T) []string {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "leaderelection", "name": "test-leaderelection"})).String()})
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to list pods from ns: %s: %v", namespace, err)
|
||||
}
|
||||
podNames := make([]string, len(podList.Items))
|
||||
for i, pod := range podList.Items {
|
||||
podNames[i] = pod.Name
|
||||
}
|
||||
return podNames
|
||||
}
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
v1qos "k8s.io/kubectl/pkg/util/qos"
|
||||
|
||||
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
|
||||
"sigs.k8s.io/descheduler/pkg/api"
|
||||
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler"
|
||||
"sigs.k8s.io/descheduler/pkg/descheduler/client"
|
||||
@@ -198,7 +199,6 @@ func runPodLifetimeStrategy(
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
false,
|
||||
evictCritical,
|
||||
false,
|
||||
@@ -324,7 +324,7 @@ func TestLowNodeUtilization(t *testing.T) {
|
||||
waitForRCPodsRunning(ctx, t, clientSet, rc)
|
||||
|
||||
// Run LowNodeUtilization strategy
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
|
||||
if err != nil {
|
||||
@@ -517,8 +517,8 @@ func TestEvictSystemCriticalPriorityClass(t *testing.T) {
|
||||
}
|
||||
|
||||
func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
|
||||
highPriority := int32(1000)
|
||||
lowPriority := int32(500)
|
||||
var highPriority = int32(1000)
|
||||
var lowPriority = int32(500)
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
@@ -647,8 +647,8 @@ func TestThresholdPriorityClass(t *testing.T) {
|
||||
}
|
||||
|
||||
func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
highPriority := int32(1000)
|
||||
lowPriority := int32(500)
|
||||
var highPriority = int32(1000)
|
||||
var lowPriority = int32(500)
|
||||
ctx := context.Background()
|
||||
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
@@ -745,7 +745,7 @@ func testPriority(t *testing.T, isPriorityClass bool) {
|
||||
t.Fatalf("None of %v high priority pods are expected to be deleted", expectReservePodNames)
|
||||
}
|
||||
|
||||
// check if all pods with low priority class are evicted
|
||||
//check if all pods with low priority class are evicted
|
||||
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
|
||||
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
|
||||
@@ -848,7 +848,7 @@ func TestPodLabelSelector(t *testing.T) {
|
||||
t.Fatalf("None of %v unevictable pods are expected to be deleted", expectReservePodNames)
|
||||
}
|
||||
|
||||
// check if all selected pods are evicted
|
||||
//check if all selected pods are evicted
|
||||
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
|
||||
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
|
||||
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
|
||||
@@ -886,6 +886,17 @@ func TestEvictAnnotation(t *testing.T) {
|
||||
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
|
||||
defer close(stopCh)
|
||||
|
||||
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Error listing node with %v", err)
|
||||
}
|
||||
|
||||
var nodes []*v1.Node
|
||||
for i := range nodeList.Items {
|
||||
node := nodeList.Items[i]
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
|
||||
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
|
||||
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Unable to create ns %v", testNamespace.Name)
|
||||
@@ -900,8 +911,7 @@ func TestEvictAnnotation(t *testing.T) {
|
||||
Name: "sample",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
|
||||
},
|
||||
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI)},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -966,7 +976,7 @@ func TestDeschedulingInterval(t *testing.T) {
|
||||
}
|
||||
s.Client = clientSet
|
||||
|
||||
deschedulerPolicy := &deschedulerapi.DeschedulerPolicy{}
|
||||
deschedulerPolicy := &api.DeschedulerPolicy{}
|
||||
|
||||
c := make(chan bool, 1)
|
||||
go func() {
|
||||
@@ -1133,8 +1143,8 @@ func createBalancedPodForNodes(
|
||||
|
||||
// find the max, if the node has the max,use the one, if not,use the ratio parameter
|
||||
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
|
||||
cpuFractionMap := make(map[string]float64)
|
||||
memFractionMap := make(map[string]float64)
|
||||
var cpuFractionMap = make(map[string]float64)
|
||||
var memFractionMap = make(map[string]float64)
|
||||
|
||||
for _, node := range nodes {
|
||||
cpuFraction, memFraction, _, _ := computeCPUMemFraction(t, cs, node, podRequestedResource)
|
||||
@@ -1171,7 +1181,7 @@ func createBalancedPodForNodes(
|
||||
// add crioMinMemLimit to ensure that all pods are setting at least that much for a limit, while keeping the same ratios
|
||||
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)+float64(crioMinMemLimit)), resource.BinarySI)
|
||||
|
||||
gracePeriod := int64(1)
|
||||
var gracePeriod = int64(1)
|
||||
// Don't set OwnerReferences to avoid pod eviction
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -1301,44 +1311,20 @@ func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.In
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
|
||||
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labelMap).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) != desireRunningPodNum {
|
||||
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Error waiting for pods running: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
|
||||
var allNodes []*v1.Node
|
||||
var workerNodes []*v1.Node
|
||||
for i := range nodes {
|
||||
node := nodes[i]
|
||||
allNodes = append(allNodes, &node)
|
||||
if _, exists := node.Labels["node-role.kubernetes.io/control-plane"]; !exists {
|
||||
if _, exists := node.Labels["node-role.kubernetes.io/master"]; !exists {
|
||||
workerNodes = append(workerNodes, &node)
|
||||
}
|
||||
}
|
||||
return allNodes, workerNodes
|
||||
}
|
||||
|
||||
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, nodes []*v1.Node) *evictions.PodEvictor {
|
||||
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, nodes []*v1.Node) *evictions.PodEvictor {
|
||||
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
|
||||
if err != nil || len(evictionPolicyGroupVersion) == 0 {
|
||||
t.Fatalf("Error creating eviction policy group: %v", err)
|
||||
@@ -1350,7 +1336,6 @@ func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAs
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
|
||||
@@ -137,7 +137,6 @@ func TestTooManyRestarts(t *testing.T) {
|
||||
nil,
|
||||
nil,
|
||||
nodes,
|
||||
getPodsAssignedToNode,
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
@@ -184,25 +183,14 @@ func waitPodRestartCount(ctx context.Context, clientSet clientset.Interface, nam
|
||||
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "restart-pod", "name": "test-toomanyrestarts"})).String(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(podList.Items) < 4 {
|
||||
t.Log("Waiting for 4 pods")
|
||||
return false, nil
|
||||
}
|
||||
for i := 0; i < 4; i++ {
|
||||
if len(podList.Items[0].Status.ContainerStatuses) < 1 {
|
||||
t.Logf("Waiting for podList.Items[%v].Status.ContainerStatuses to be populated", i)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if podList.Items[0].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[1].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[2].Status.ContainerStatuses[0].RestartCount >= 4 && podList.Items[3].Status.ContainerStatuses[0].RestartCount >= 4 {
|
||||
t.Log("Pod restartCount as expected")
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestTopologySpreadConstraint(t *testing.T) {
|
||||
defer deleteRC(ctx, t, clientSet, violatorRc)
|
||||
waitForRCPodsRunning(ctx, t, clientSet, violatorRc)
|
||||
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
|
||||
podEvictor := initPodEvictorOrFail(t, clientSet, nodes)
|
||||
|
||||
// Run TopologySpreadConstraint strategy
|
||||
t.Logf("Running RemovePodsViolatingTopologySpreadConstraint strategy for %s", name)
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "e2e-testleaderelection-a"
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: "descheduler/v1alpha1"
|
||||
kind: "DeschedulerPolicy"
|
||||
strategies:
|
||||
"PodLifeTime":
|
||||
enabled: true
|
||||
params:
|
||||
podLifeTime:
|
||||
maxPodLifeTimeSeconds: 5
|
||||
namespaces:
|
||||
include:
|
||||
- "e2e-testleaderelection-b"
|
||||
@@ -20,9 +20,9 @@ set -o nounset
|
||||
|
||||
# This just runs e2e tests.
|
||||
if [ -n "$KIND_E2E" ]; then
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.24.0}
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.21.1}
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl && chmod +x kubectl && mv kubectl /usr/local/bin/
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.13.0/kind-linux-amd64
|
||||
wget https://github.com/kubernetes-sigs/kind/releases/download/v0.11.0/kind-linux-amd64
|
||||
chmod +x kind-linux-amd64
|
||||
mv kind-linux-amd64 kind
|
||||
export PATH=$PATH:$PWD
|
||||
|
||||
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/blang/semver/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4.3
|
||||
- go: 1.5.4
|
||||
- go: 1.6.3
|
||||
- go: 1.7
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci
|
||||
-repotoken $COVERALLS_TOKEN
|
||||
- echo "Build examples" ; cd examples && go build
|
||||
- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .)
|
||||
env:
|
||||
global:
|
||||
secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw=
|
||||
0
vendor/github.com/blang/semver/v4/LICENSE → vendor/github.com/blang/semver/LICENSE
generated
vendored
0
vendor/github.com/blang/semver/v4/LICENSE → vendor/github.com/blang/semver/LICENSE
generated
vendored
194
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
194
vendor/github.com/blang/semver/README.md
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
semver for golang [](https://travis-ci.org/blang/semver) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
||||
======
|
||||
|
||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||
|
||||
Usage
|
||||
-----
|
||||
```bash
|
||||
$ go get github.com/blang/semver
|
||||
```
|
||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
v1, err := semver.Make("1.0.0-beta")
|
||||
v2, err := semver.Make("2.0.0-beta")
|
||||
v1.Compare(v2)
|
||||
```
|
||||
|
||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||
|
||||
Why should I use this lib?
|
||||
-----
|
||||
|
||||
- Fully spec compatible
|
||||
- No reflection
|
||||
- No regex
|
||||
- Fully tested (Coverage >99%)
|
||||
- Readable parsing/validation errors
|
||||
- Fast (See [Benchmarks](#benchmarks))
|
||||
- Only Stdlib
|
||||
- Uses values instead of pointers
|
||||
- Many features, see below
|
||||
|
||||
|
||||
Features
|
||||
-----
|
||||
|
||||
- Parsing and validation at all levels
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1`
|
||||
- Wildcards `>=1.x`, `<=2.5.x`
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
Ranges
|
||||
------
|
||||
|
||||
A `Range` is a set of conditions which specify which versions satisfy the range.
|
||||
|
||||
A condition is composed of an operator and a version. The supported operators are:
|
||||
|
||||
- `<1.0.0` Less than `1.0.0`
|
||||
- `<=1.0.0` Less than or equal to `1.0.0`
|
||||
- `>1.0.0` Greater than `1.0.0`
|
||||
- `>=1.0.0` Greater than or equal to `1.0.0`
|
||||
- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0`
|
||||
- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`.
|
||||
|
||||
Note that spaces between the operator and the version will be gracefully tolerated.
|
||||
|
||||
A `Range` can link multiple `Ranges` separated by space:
|
||||
|
||||
Ranges can be linked by logical AND:
|
||||
|
||||
- `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0`
|
||||
- `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2`
|
||||
|
||||
Ranges can also be linked by logical OR:
|
||||
|
||||
- `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x`
|
||||
|
||||
AND has a higher precedence than OR. It's not possible to use brackets.
|
||||
|
||||
Ranges can be combined by both AND and OR
|
||||
|
||||
- `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
|
||||
|
||||
Range usage:
|
||||
|
||||
```
|
||||
v, err := semver.Parse("1.2.3")
|
||||
range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0")
|
||||
if range(v) {
|
||||
//valid
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Example
|
||||
-----
|
||||
|
||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
|
||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||
fmt.Printf("Major: %d\n", v.Major)
|
||||
fmt.Printf("Minor: %d\n", v.Minor)
|
||||
fmt.Printf("Patch: %d\n", v.Patch)
|
||||
fmt.Printf("Pre: %s\n", v.Pre)
|
||||
fmt.Printf("Build: %s\n", v.Build)
|
||||
|
||||
// Prerelease versions array
|
||||
if len(v.Pre) > 0 {
|
||||
fmt.Println("Prerelease versions:")
|
||||
for i, pre := range v.Pre {
|
||||
fmt.Printf("%d: %q\n", i, pre)
|
||||
}
|
||||
}
|
||||
|
||||
// Build meta data array
|
||||
if len(v.Build) > 0 {
|
||||
fmt.Println("Build meta data:")
|
||||
for i, build := range v.Build {
|
||||
fmt.Printf("%d: %q\n", i, build)
|
||||
}
|
||||
}
|
||||
|
||||
v001, err := semver.Make("0.0.1")
|
||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||
v001.GT(v) == true
|
||||
v.LT(v001) == true
|
||||
v.GTE(v) == true
|
||||
v.LTE(v) == true
|
||||
|
||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||
v001.Compare(v) == 1
|
||||
v.Compare(v001) == -1
|
||||
v.Compare(v) == 0
|
||||
|
||||
// Manipulate Version in place:
|
||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing pre release version: %q", err)
|
||||
}
|
||||
|
||||
fmt.Println("\nValidate versions:")
|
||||
v.Build[0] = "?"
|
||||
|
||||
err = v.Validate()
|
||||
if err != nil {
|
||||
fmt.Printf("Validation failed: %s\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op
|
||||
BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op
|
||||
BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op
|
||||
BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op
|
||||
BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op
|
||||
BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op
|
||||
BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op
|
||||
BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op
|
||||
BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
||||
Motivation
|
||||
-----
|
||||
|
||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||
|
||||
|
||||
Contribution
|
||||
-----
|
||||
|
||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||
|
||||
|
||||
License
|
||||
-----
|
||||
|
||||
See [LICENSE](LICENSE) file.
|
||||
0
vendor/github.com/blang/semver/v4/json.go → vendor/github.com/blang/semver/json.go
generated
vendored
0
vendor/github.com/blang/semver/v4/json.go → vendor/github.com/blang/semver/json.go
generated
vendored
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
17
vendor/github.com/blang/semver/package.json
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"author": "blang",
|
||||
"bugs": {
|
||||
"URL": "https://github.com/blang/semver/issues",
|
||||
"url": "https://github.com/blang/semver/issues"
|
||||
},
|
||||
"gx": {
|
||||
"dvcsimport": "github.com/blang/semver"
|
||||
},
|
||||
"gxVersion": "0.10.0",
|
||||
"language": "go",
|
||||
"license": "MIT",
|
||||
"name": "semver",
|
||||
"releaseCmd": "git commit -a -m \"gx publish $VERSION\"",
|
||||
"version": "3.5.1"
|
||||
}
|
||||
|
||||
2
vendor/github.com/blang/semver/v4/range.go → vendor/github.com/blang/semver/range.go
generated
vendored
2
vendor/github.com/blang/semver/v4/range.go → vendor/github.com/blang/semver/range.go
generated
vendored
@@ -327,7 +327,7 @@ func expandWildcardVersion(parts [][]string) ([][]string, error) {
|
||||
for _, p := range parts {
|
||||
var newParts []string
|
||||
for _, ap := range p {
|
||||
if strings.Contains(ap, "x") {
|
||||
if strings.Index(ap, "x") != -1 {
|
||||
opStr, vStr, err := splitComparatorVersion(ap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
72
vendor/github.com/blang/semver/v4/semver.go → vendor/github.com/blang/semver/semver.go
generated
vendored
72
vendor/github.com/blang/semver/v4/semver.go → vendor/github.com/blang/semver/semver.go
generated
vendored
@@ -26,7 +26,7 @@ type Version struct {
|
||||
Minor uint64
|
||||
Patch uint64
|
||||
Pre []PRVersion
|
||||
Build []string //No Precedence
|
||||
Build []string //No Precendence
|
||||
}
|
||||
|
||||
// Version to string
|
||||
@@ -61,18 +61,6 @@ func (v Version) String() string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// FinalizeVersion discards prerelease and build number and only returns
|
||||
// major, minor and patch number.
|
||||
func (v Version) FinalizeVersion() string {
|
||||
b := make([]byte, 0, 5)
|
||||
b = strconv.AppendUint(b, v.Major, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Minor, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Patch, 10)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Equals checks if v is equal to o.
|
||||
func (v Version) Equals(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
@@ -173,27 +161,6 @@ func (v Version) Compare(o Version) int {
|
||||
|
||||
}
|
||||
|
||||
// IncrementPatch increments the patch version
|
||||
func (v *Version) IncrementPatch() error {
|
||||
v.Patch++
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrementMinor increments the minor version
|
||||
func (v *Version) IncrementMinor() error {
|
||||
v.Minor++
|
||||
v.Patch = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// IncrementMajor increments the major version
|
||||
func (v *Version) IncrementMajor() error {
|
||||
v.Major++
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate validates v and returns error in case
|
||||
func (v Version) Validate() error {
|
||||
// Major, Minor, Patch already validated using uint64
|
||||
@@ -222,10 +189,10 @@ func (v Version) Validate() error {
|
||||
}
|
||||
|
||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||
func New(s string) (*Version, error) {
|
||||
func New(s string) (vp *Version, err error) {
|
||||
v, err := Parse(s)
|
||||
vp := &v
|
||||
return vp, err
|
||||
vp = &v
|
||||
return
|
||||
}
|
||||
|
||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||
@@ -235,25 +202,14 @@ func Make(s string) (Version, error) {
|
||||
|
||||
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
|
||||
// specs to be parsed by this library. It does so by normalizing versions before passing them to
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions
|
||||
// with only major and minor components specified, and removes leading 0s.
|
||||
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
|
||||
// with only major and minor components specified
|
||||
func ParseTolerant(s string) (Version, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
s = strings.TrimPrefix(s, "v")
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
// Remove leading zeros.
|
||||
for i, p := range parts {
|
||||
if len(p) > 1 {
|
||||
p = strings.TrimLeft(p, "0")
|
||||
if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") {
|
||||
p = "0" + p
|
||||
}
|
||||
parts[i] = p
|
||||
}
|
||||
}
|
||||
// Fill up shortened versions.
|
||||
if len(parts) < 3 {
|
||||
if strings.ContainsAny(parts[len(parts)-1], "+-") {
|
||||
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
|
||||
@@ -261,8 +217,8 @@ func ParseTolerant(s string) (Version, error) {
|
||||
for len(parts) < 3 {
|
||||
parts = append(parts, "0")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
}
|
||||
s = strings.Join(parts, ".")
|
||||
|
||||
return Parse(s)
|
||||
}
|
||||
@@ -460,17 +416,3 @@ func NewBuildVersion(s string) (string, error) {
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// FinalizeVersion returns the major, minor and patch number only and discards
|
||||
// prerelease and build number.
|
||||
func FinalizeVersion(s string) (string, error) {
|
||||
v, err := Parse(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
v.Pre = nil
|
||||
v.Build = nil
|
||||
|
||||
finalVer := v.String()
|
||||
return finalVer, nil
|
||||
}
|
||||
0
vendor/github.com/blang/semver/v4/sort.go → vendor/github.com/blang/semver/sort.go
generated
vendored
0
vendor/github.com/blang/semver/v4/sort.go → vendor/github.com/blang/semver/sort.go
generated
vendored
2
vendor/github.com/blang/semver/v4/sql.go → vendor/github.com/blang/semver/sql.go
generated
vendored
2
vendor/github.com/blang/semver/v4/sql.go → vendor/github.com/blang/semver/sql.go
generated
vendored
@@ -14,7 +14,7 @@ func (v *Version) Scan(src interface{}) (err error) {
|
||||
case []byte:
|
||||
str = string(src)
|
||||
default:
|
||||
return fmt.Errorf("version.Scan: cannot convert %T to string", src)
|
||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||
}
|
||||
|
||||
if t, err := Parse(str); err == nil {
|
||||
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
||||
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
@@ -64,6 +64,4 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
||||
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -193,6 +193,7 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until SI > BX.
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
@@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
||||
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -6,52 +6,41 @@
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
||||
|
||||
@@ -22,9 +22,8 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
extensions "github.com/googleapis/gnostic/extensions"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
extensions "github.com/google/gnostic/extensions"
|
||||
)
|
||||
|
||||
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
|
||||
@@ -20,9 +20,8 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/googleapis/gnostic/jsonschema"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/google/gnostic/jsonschema"
|
||||
)
|
||||
|
||||
// compiler helper functions, usually called from generated code
|
||||
@@ -15,7 +15,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.18.1
|
||||
// protoc v3.15.5
|
||||
// source: extensions/extension.proto
|
||||
|
||||
package gnostic_extension_v1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user